From 8c408a4b8108f076d20ce55f4fc95f85279e09a3 Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Tue, 17 Feb 2026 10:23:47 -0600 Subject: [PATCH 1/5] qbe in native build --- build.cm | 27 +- internal/os.c | 2 + meson.build | 53 +- source/qbe_backend.c | 172 + src/qbe/.gitignore | 6 + src/qbe/LICENSE | 19 + src/qbe/Makefile | 103 + src/qbe/README | 18 + src/qbe/abi.c | 25 + src/qbe/alias.c | 222 + src/qbe/all.h | 631 + src/qbe/amd64/all.h | 82 + src/qbe/amd64/emit.c | 844 + src/qbe/amd64/isel.c | 942 + src/qbe/amd64/sysv.c | 721 + src/qbe/amd64/targ.c | 67 + src/qbe/amd64/winabi.c | 763 + src/qbe/arm64/abi.c | 852 + src/qbe/arm64/all.h | 38 + src/qbe/arm64/emit.c | 679 + src/qbe/arm64/isel.c | 316 + src/qbe/arm64/targ.c | 69 + src/qbe/cfg.c | 567 + src/qbe/copy.c | 408 + src/qbe/doc/abi.txt | 141 + src/qbe/doc/il.txt | 1196 ++ src/qbe/doc/llvm.txt | 98 + src/qbe/doc/native_win.txt | 15 + src/qbe/doc/rv64.txt | 20 + src/qbe/doc/win.txt | 23 + src/qbe/emit.c | 271 + src/qbe/fold.c | 246 + src/qbe/gcm.c | 460 + src/qbe/gvn.c | 508 + src/qbe/ifopt.c | 121 + src/qbe/live.c | 144 + src/qbe/load.c | 493 + src/qbe/main.c | 212 + src/qbe/mem.c | 488 + src/qbe/minic/.gitignore | 4 + src/qbe/minic/Makefile | 12 + src/qbe/minic/mcc | 44 + src/qbe/minic/minic.y | 951 + src/qbe/minic/test/collatz.c | 33 + src/qbe/minic/test/euler9.c | 27 + src/qbe/minic/test/knight.c | 60 + src/qbe/minic/test/mandel.c | 88 + src/qbe/minic/test/prime.c | 28 + src/qbe/minic/test/queen.c | 70 + src/qbe/minic/yacc.c | 1378 ++ src/qbe/ops.h | 228 + src/qbe/out.s | 43 + src/qbe/parse.c | 1433 ++ src/qbe/rega.c | 696 + src/qbe/rv64/abi.c | 653 + src/qbe/rv64/all.h | 52 + src/qbe/rv64/emit.c | 569 + src/qbe/rv64/isel.c | 255 + src/qbe/rv64/targ.c | 57 + src/qbe/simpl.c | 124 + src/qbe/spill.c | 531 + src/qbe/ssa.c | 433 + src/qbe/test/_alt.ssa | 25 + src/qbe/test/_bf99.ssa | 2687 +++ src/qbe/test/_bfmandel.ssa | 9079 ++++++++ src/qbe/test/_chacha20.ssa | 233 + src/qbe/test/_dragon.ssa | 33 + src/qbe/test/_fix1.ssa | 15 + src/qbe/test/_fix2.ssa | 15 + src/qbe/test/_fix3.ssa | 20 + src/qbe/test/_fix4.ssa | 27 + src/qbe/test/_gcm1.ssa | 48 + src/qbe/test/_gcm2.ssa | 43 + src/qbe/test/_live.ssa | 21 + src/qbe/test/_load-elim.ssa | 17 + src/qbe/test/_rpo.ssa | 12 + src/qbe/test/_slow.qbe | 35762 +++++++++++++++++++++++++++++++ src/qbe/test/_spill1.ssa | 22 + src/qbe/test/_spill2.ssa | 22 + src/qbe/test/_spill3.ssa | 24 + src/qbe/test/abi1.ssa | 60 + src/qbe/test/abi2.ssa | 19 + src/qbe/test/abi3.ssa | 45 + src/qbe/test/abi4.ssa | 39 + src/qbe/test/abi5.ssa | 144 + src/qbe/test/abi6.ssa | 38 + src/qbe/test/abi7.ssa | 21 + src/qbe/test/abi8.ssa | 278 + src/qbe/test/abi9.ssa | 20 + src/qbe/test/alias1.ssa | 21 + src/qbe/test/align.ssa | 17 + src/qbe/test/cmp1.ssa | 17 + src/qbe/test/collatz.ssa | 62 + src/qbe/test/conaddr.ssa | 76 + src/qbe/test/copy.ssa | 15 + src/qbe/test/cprime.ssa | 104 + src/qbe/test/cup.ssa | 18 + src/qbe/test/dark.ssa | 32 + src/qbe/test/double.ssa | 25 + src/qbe/test/dynalloc.ssa | 27 + src/qbe/test/echo.ssa | 33 + src/qbe/test/env.ssa | 21 + src/qbe/test/eucl.ssa | 25 + src/qbe/test/euclc.ssa | 30 + src/qbe/test/fixarg.ssa | 15 + src/qbe/test/fold1.ssa | 47 + src/qbe/test/fpcnv.ssa | 134 + src/qbe/test/gvn1.ssa | 19 + src/qbe/test/gvn2.ssa | 31 + src/qbe/test/ifc.ssa | 238 + src/qbe/test/isel1.ssa | 24 + src/qbe/test/isel2.ssa | 122 + src/qbe/test/isel3.ssa | 87 + src/qbe/test/isel4.ssa | 64 + src/qbe/test/isel5.ssa | 16 + src/qbe/test/isel6.ssa | 38 + src/qbe/test/ldbits.ssa | 40 + src/qbe/test/ldhoist.ssa | 21 + src/qbe/test/load1.ssa | 27 + src/qbe/test/load2.ssa | 75 + src/qbe/test/load3.ssa | 50 + src/qbe/test/loop.ssa | 24 + src/qbe/test/mandel.ssa | 124 + src/qbe/test/max.ssa | 34 + src/qbe/test/mem1.ssa | 35 + src/qbe/test/mem2.ssa | 32 + src/qbe/test/mem3.ssa | 48 + src/qbe/test/philv.ssa | 34 + src/qbe/test/prime.ssa | 33 + src/qbe/test/puts10.ssa | 30 + src/qbe/test/queen.ssa | 282 + src/qbe/test/rega1.ssa | 24 + src/qbe/test/spill1.ssa | 68 + src/qbe/test/strcmp.ssa | 63 + src/qbe/test/strspn.ssa | 77 + src/qbe/test/sum.ssa | 32 + src/qbe/test/tls.ssa | 75 + src/qbe/test/vararg1.ssa | 35 + src/qbe/test/vararg2.ssa | 684 + src/qbe/tools/abi8.py | 110 + src/qbe/tools/abifuzz.sh | 107 + src/qbe/tools/callgen.ml | 535 + src/qbe/tools/cra.sh | 38 + src/qbe/tools/lexh.c | 94 + src/qbe/tools/log2.c | 64 + src/qbe/tools/mgen/.gitignore | 3 + src/qbe/tools/mgen/.ocp-indent | 1 + src/qbe/tools/mgen/Makefile | 16 + src/qbe/tools/mgen/cgen.ml | 420 + src/qbe/tools/mgen/fuzz.ml | 413 + src/qbe/tools/mgen/main.ml | 214 + src/qbe/tools/mgen/match.ml | 651 + src/qbe/tools/mgen/sexp.ml | 292 + src/qbe/tools/mgen/test.ml | 134 + src/qbe/tools/pmov.c | 262 + src/qbe/tools/test.sh | 267 + src/qbe/tools/vatest.py | 161 + src/qbe/util.c | 774 + 158 files changed, 76441 insertions(+), 20 deletions(-) create mode 100644 source/qbe_backend.c create mode 100644 src/qbe/.gitignore create mode 100644 src/qbe/LICENSE create mode 100644 src/qbe/Makefile create mode 100644 src/qbe/README create mode 100644 src/qbe/abi.c create mode 100644 src/qbe/alias.c create mode 100644 src/qbe/all.h create mode 100644 src/qbe/amd64/all.h create mode 100644 src/qbe/amd64/emit.c create mode 100644 src/qbe/amd64/isel.c create mode 100644 src/qbe/amd64/sysv.c create mode 100644 src/qbe/amd64/targ.c create mode 100755 src/qbe/amd64/winabi.c create mode 100644 src/qbe/arm64/abi.c create mode 100644 src/qbe/arm64/all.h create mode 100644 src/qbe/arm64/emit.c create mode 100644 src/qbe/arm64/isel.c create mode 100644 src/qbe/arm64/targ.c create mode 100644 src/qbe/cfg.c create mode 100644 src/qbe/copy.c create mode 100644 src/qbe/doc/abi.txt create mode 100644 src/qbe/doc/il.txt create mode 100644 src/qbe/doc/llvm.txt create mode 100644 src/qbe/doc/native_win.txt create mode 100644 src/qbe/doc/rv64.txt create mode 100644 src/qbe/doc/win.txt create mode 100644 src/qbe/emit.c create mode 100644 src/qbe/fold.c create mode 100644 src/qbe/gcm.c create mode 100644 src/qbe/gvn.c create mode 100644 src/qbe/ifopt.c create mode 100644 src/qbe/live.c create mode 100644 src/qbe/load.c create mode 100644 src/qbe/main.c create mode 100644 src/qbe/mem.c create mode 100644 src/qbe/minic/.gitignore create mode 100644 src/qbe/minic/Makefile create mode 100755 src/qbe/minic/mcc create mode 100644 src/qbe/minic/minic.y create mode 100644 src/qbe/minic/test/collatz.c create mode 100644 src/qbe/minic/test/euler9.c create mode 100644 src/qbe/minic/test/knight.c create mode 100644 src/qbe/minic/test/mandel.c create mode 100644 src/qbe/minic/test/prime.c create mode 100644 src/qbe/minic/test/queen.c create mode 100644 src/qbe/minic/yacc.c create mode 100644 src/qbe/ops.h create mode 100644 src/qbe/out.s create mode 100644 src/qbe/parse.c create mode 100644 src/qbe/rega.c create mode 100644 src/qbe/rv64/abi.c create mode 100644 src/qbe/rv64/all.h create mode 100644 src/qbe/rv64/emit.c create mode 100644 src/qbe/rv64/isel.c create mode 100644 src/qbe/rv64/targ.c create mode 100644 src/qbe/simpl.c create mode 100644 src/qbe/spill.c create mode 100644 src/qbe/ssa.c create mode 100644 src/qbe/test/_alt.ssa create mode 100644 src/qbe/test/_bf99.ssa create mode 100644 src/qbe/test/_bfmandel.ssa create mode 100644 src/qbe/test/_chacha20.ssa create mode 100644 src/qbe/test/_dragon.ssa create mode 100644 src/qbe/test/_fix1.ssa create mode 100644 src/qbe/test/_fix2.ssa create mode 100644 src/qbe/test/_fix3.ssa create mode 100644 src/qbe/test/_fix4.ssa create mode 100644 src/qbe/test/_gcm1.ssa create mode 100644 src/qbe/test/_gcm2.ssa create mode 100644 src/qbe/test/_live.ssa create mode 100644 src/qbe/test/_load-elim.ssa create mode 100644 src/qbe/test/_rpo.ssa create mode 100644 src/qbe/test/_slow.qbe create mode 100644 src/qbe/test/_spill1.ssa create mode 100644 src/qbe/test/_spill2.ssa create mode 100644 src/qbe/test/_spill3.ssa create mode 100644 src/qbe/test/abi1.ssa create mode 100644 src/qbe/test/abi2.ssa create mode 100644 src/qbe/test/abi3.ssa create mode 100644 src/qbe/test/abi4.ssa create mode 100644 src/qbe/test/abi5.ssa create mode 100644 src/qbe/test/abi6.ssa create mode 100644 src/qbe/test/abi7.ssa create mode 100644 src/qbe/test/abi8.ssa create mode 100644 src/qbe/test/abi9.ssa create mode 100644 src/qbe/test/alias1.ssa create mode 100644 src/qbe/test/align.ssa create mode 100644 src/qbe/test/cmp1.ssa create mode 100644 src/qbe/test/collatz.ssa create mode 100644 src/qbe/test/conaddr.ssa create mode 100644 src/qbe/test/copy.ssa create mode 100644 src/qbe/test/cprime.ssa create mode 100644 src/qbe/test/cup.ssa create mode 100644 src/qbe/test/dark.ssa create mode 100644 src/qbe/test/double.ssa create mode 100644 src/qbe/test/dynalloc.ssa create mode 100644 src/qbe/test/echo.ssa create mode 100644 src/qbe/test/env.ssa create mode 100644 src/qbe/test/eucl.ssa create mode 100644 src/qbe/test/euclc.ssa create mode 100644 src/qbe/test/fixarg.ssa create mode 100644 src/qbe/test/fold1.ssa create mode 100644 src/qbe/test/fpcnv.ssa create mode 100644 src/qbe/test/gvn1.ssa create mode 100644 src/qbe/test/gvn2.ssa create mode 100644 src/qbe/test/ifc.ssa create mode 100644 src/qbe/test/isel1.ssa create mode 100644 src/qbe/test/isel2.ssa create mode 100644 src/qbe/test/isel3.ssa create mode 100644 src/qbe/test/isel4.ssa create mode 100644 src/qbe/test/isel5.ssa create mode 100644 src/qbe/test/isel6.ssa create mode 100644 src/qbe/test/ldbits.ssa create mode 100644 src/qbe/test/ldhoist.ssa create mode 100644 src/qbe/test/load1.ssa create mode 100644 src/qbe/test/load2.ssa create mode 100644 src/qbe/test/load3.ssa create mode 100644 src/qbe/test/loop.ssa create mode 100644 src/qbe/test/mandel.ssa create mode 100644 src/qbe/test/max.ssa create mode 100644 src/qbe/test/mem1.ssa create mode 100644 src/qbe/test/mem2.ssa create mode 100644 src/qbe/test/mem3.ssa create mode 100644 src/qbe/test/philv.ssa create mode 100644 src/qbe/test/prime.ssa create mode 100644 src/qbe/test/puts10.ssa create mode 100644 src/qbe/test/queen.ssa create mode 100644 src/qbe/test/rega1.ssa create mode 100644 src/qbe/test/spill1.ssa create mode 100644 src/qbe/test/strcmp.ssa create mode 100644 src/qbe/test/strspn.ssa create mode 100644 src/qbe/test/sum.ssa create mode 100644 src/qbe/test/tls.ssa create mode 100644 src/qbe/test/vararg1.ssa create mode 100644 src/qbe/test/vararg2.ssa create mode 100755 src/qbe/tools/abi8.py create mode 100755 src/qbe/tools/abifuzz.sh create mode 100644 src/qbe/tools/callgen.ml create mode 100755 src/qbe/tools/cra.sh create mode 100644 src/qbe/tools/lexh.c create mode 100644 src/qbe/tools/log2.c create mode 100644 src/qbe/tools/mgen/.gitignore create mode 100644 src/qbe/tools/mgen/.ocp-indent create mode 100644 src/qbe/tools/mgen/Makefile create mode 100644 src/qbe/tools/mgen/cgen.ml create mode 100644 src/qbe/tools/mgen/fuzz.ml create mode 100644 src/qbe/tools/mgen/main.ml create mode 100644 src/qbe/tools/mgen/match.ml create mode 100644 src/qbe/tools/mgen/sexp.ml create mode 100644 src/qbe/tools/mgen/test.ml create mode 100644 src/qbe/tools/pmov.c create mode 100755 src/qbe/tools/test.sh create mode 100644 src/qbe/tools/vatest.py create mode 100644 src/qbe/util.c diff --git a/build.cm b/build.cm index 73c29df2..69d775b0 100644 --- a/build.cm +++ b/build.cm @@ -550,23 +550,17 @@ Build.compile_native = function(src_path, target, buildtype, pkg) { if (fd.is_file(dylib_path)) return dylib_path - // Step 4: Write QBE IL to temp file + // Step 4: QBE compile IR to assembly (in-process) var tmp = '/tmp/cell_native_' + hash - var ssa_path = tmp + '.ssa' var s_path = tmp + '.s' var o_path = tmp + '.o' var rt_o_path = '/tmp/cell_qbe_rt.o' - fd.slurpwrite(ssa_path, stone(blob(il))) + var asm_text = os.qbe(il) + fd.slurpwrite(s_path, stone(blob(asm_text))) - // Step 5: QBE compile to assembly - var rc = os.system('qbe -o ' + s_path + ' ' + ssa_path) - if (rc != 0) { - print('QBE compilation failed for: ' + src_path); disrupt - } - - // Step 6: Assemble - rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) + // Step 5: Assemble + var rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) if (rc != 0) { print('Assembly failed for: ' + src_path); disrupt } @@ -644,19 +638,14 @@ Build.compile_native_ir = function(optimized, src_path, opts) { return dylib_path var tmp = '/tmp/cell_native_' + hash - var ssa_path = tmp + '.ssa' var s_path = tmp + '.s' var o_path = tmp + '.o' var rt_o_path = '/tmp/cell_qbe_rt.o' - fd.slurpwrite(ssa_path, stone(blob(il))) + var asm_text = os.qbe(il) + fd.slurpwrite(s_path, stone(blob(asm_text))) - var rc = os.system('qbe -o ' + s_path + ' ' + ssa_path) - if (rc != 0) { - print('QBE compilation failed for: ' + src_path); disrupt - } - - rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) + var rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) if (rc != 0) { print('Assembly failed for: ' + src_path); disrupt } diff --git a/internal/os.c b/internal/os.c index 053a1ab4..7139e52b 100644 --- a/internal/os.c +++ b/internal/os.c @@ -447,6 +447,7 @@ static JSValue js_os_dylib_close(JSContext *js, JSValue self, int argc, JSValue Uses cell_rt_native_module_load from qbe_helpers.c */ extern JSValue cell_rt_native_module_load(JSContext *ctx, void *dl_handle, JSValue env); extern JSValue cell_rt_native_module_load_named(JSContext *ctx, void *dl_handle, const char *sym_name, JSValue env); +extern JSValue js_os_qbe(JSContext *, JSValue, int, JSValue *); static JSValue js_os_native_module_load(JSContext *js, JSValue self, int argc, JSValue *argv) { @@ -663,6 +664,7 @@ static const JSCFunctionListEntry js_os_funcs[] = { MIST_FUNC_DEF(os, print, 1), MIST_FUNC_DEF(os, random, 0), MIST_FUNC_DEF(os, getenv, 1), + MIST_FUNC_DEF(os, qbe, 1), }; JSValue js_core_os_use(JSContext *js) { diff --git a/meson.build b/meson.build index 55205108..a9a953c2 100644 --- a/meson.build +++ b/meson.build @@ -60,6 +60,7 @@ src += [ # core src += ['scheduler.c'] src += ['qbe_helpers.c'] +src += ['qbe_backend.c'] scripts = [ 'debug/js.c', @@ -84,18 +85,67 @@ foreach file: scripts endforeach srceng = 'source' -includes = [srceng, 'internal', 'debug', 'net', 'archive'] +includes = [srceng, 'internal', 'debug', 'net', 'archive', 'src/qbe'] foreach file : src full_path = join_paths(srceng, file) sources += files(full_path) endforeach +# QBE compiler sources (all except main.c) +# Built as a separate static library to avoid -x objective-c on macOS +# (QBE uses 'Class' as a struct name, which conflicts with ObjC) +qbe_src = [ + 'src/qbe/util.c', + 'src/qbe/parse.c', + 'src/qbe/abi.c', + 'src/qbe/cfg.c', + 'src/qbe/mem.c', + 'src/qbe/ssa.c', + 'src/qbe/alias.c', + 'src/qbe/load.c', + 'src/qbe/copy.c', + 'src/qbe/fold.c', + 'src/qbe/gvn.c', + 'src/qbe/gcm.c', + 'src/qbe/simpl.c', + 'src/qbe/ifopt.c', + 'src/qbe/live.c', + 'src/qbe/spill.c', + 'src/qbe/rega.c', + 'src/qbe/emit.c', + 'src/qbe/amd64/targ.c', + 'src/qbe/amd64/sysv.c', + 'src/qbe/amd64/isel.c', + 'src/qbe/amd64/emit.c', + 'src/qbe/amd64/winabi.c', + 'src/qbe/arm64/targ.c', + 'src/qbe/arm64/abi.c', + 'src/qbe/arm64/isel.c', + 'src/qbe/arm64/emit.c', + 'src/qbe/rv64/targ.c', + 'src/qbe/rv64/abi.c', + 'src/qbe/rv64/isel.c', + 'src/qbe/rv64/emit.c', +] + +qbe_files = [] +foreach file : qbe_src + qbe_files += files(file) +endforeach + includers = [] foreach inc : includes includers += include_directories(inc) endforeach +qbe_c_args = ['-x', 'c'] +qbe_lib = static_library('qbe', + qbe_files, + include_directories: includers, + c_args: qbe_c_args, +) + if host_machine.system() == 'windows' exe_ext = '.exe' link += '-Wl,--export-all-symbols' @@ -109,6 +159,7 @@ cell_so = shared_library( sources, include_directories: includers, dependencies: deps, + link_whole: qbe_lib, install : true, ) diff --git a/source/qbe_backend.c b/source/qbe_backend.c new file mode 100644 index 00000000..cd5a4301 --- /dev/null +++ b/source/qbe_backend.c @@ -0,0 +1,172 @@ +/* + * QBE Backend — in-process QBE IR → assembly compilation. + * + * Wraps QBE as a library: feeds IR text via fmemopen(), captures + * assembly output via open_memstream(), returns it as a JS string. + * No subprocess, no temp files for IR, no external qbe binary needed. + */ + +#include "cell.h" +#include +#include +#include + +/* QBE headers */ +#include "all.h" +#include "config.h" + +/* QBE globals (declared extern in all.h) */ +Target T; +char debug['Z'+1] = {0}; + +extern Target T_amd64_sysv; +extern Target T_amd64_apple; +extern Target T_amd64_win; +extern Target T_arm64; +extern Target T_arm64_apple; +extern Target T_rv64; + +/* Captured output stream — set before calling parse() */ +static FILE *qbe_outf; + +static void qbe_data(Dat *d) { + emitdat(d, qbe_outf); + if (d->type == DEnd) { + fputs("/* end data */\n\n", qbe_outf); + freeall(); + } +} + +static void qbe_func(Fn *fn) { + uint n; + + T.abi0(fn); + fillcfg(fn); + filluse(fn); + promote(fn); + filluse(fn); + ssa(fn); + filluse(fn); + ssacheck(fn); + fillalias(fn); + loadopt(fn); + filluse(fn); + fillalias(fn); + coalesce(fn); + filluse(fn); + filldom(fn); + ssacheck(fn); + gvn(fn); + fillcfg(fn); + simplcfg(fn); + filluse(fn); + filldom(fn); + gcm(fn); + filluse(fn); + ssacheck(fn); + if (T.cansel) { + ifconvert(fn); + fillcfg(fn); + filluse(fn); + filldom(fn); + ssacheck(fn); + } + T.abi1(fn); + simpl(fn); + fillcfg(fn); + filluse(fn); + T.isel(fn); + fillcfg(fn); + filllive(fn); + fillloop(fn); + fillcost(fn); + spill(fn); + rega(fn); + fillcfg(fn); + simpljmp(fn); + fillcfg(fn); + assert(fn->rpo[0] == fn->start); + for (n = 0;; n++) + if (n == fn->nblk - 1) { + fn->rpo[n]->link = 0; + break; + } else + fn->rpo[n]->link = fn->rpo[n+1]; + T.emitfn(fn, qbe_outf); + fprintf(qbe_outf, "/* end function %s */\n\n", fn->name); + freeall(); +} + +static void qbe_dbgfile(char *fn) { + emitdbgfile(fn, qbe_outf); +} + +/* + * js_os_qbe(ctx, self, argc, argv) + * + * Takes a single string argument (QBE IR text). + * Returns the compiled assembly as a string. + */ +JSValue js_os_qbe(JSContext *js, JSValue self, int argc, JSValue *argv) { + if (argc < 1) + return JS_ThrowTypeError(js, "os.qbe requires an IR string argument"); + + const char *ir = JS_ToCString(js, argv[0]); + if (!ir) + return JS_EXCEPTION; + + size_t ir_len = strlen(ir); + + /* Select target for host platform */ +#if defined(__APPLE__) && defined(__aarch64__) + T = T_arm64_apple; +#elif defined(__APPLE__) && defined(__x86_64__) + T = T_amd64_apple; +#elif defined(_WIN32) && defined(__x86_64__) + T = T_amd64_win; +#elif defined(__x86_64__) + T = T_amd64_sysv; +#elif defined(__aarch64__) + T = T_arm64; +#elif defined(__riscv) && __riscv_xlen == 64 + T = T_rv64; +#else + T = Deftgt; +#endif + + memset(debug, 0, sizeof(debug)); + + /* Open IR string as input FILE */ + FILE *inf = fmemopen((void *)ir, ir_len, "r"); + if (!inf) { + JS_FreeCString(js, ir); + return JS_ThrowInternalError(js, "os.qbe: fmemopen failed"); + } + + /* Open output memory stream */ + char *out_buf = NULL; + size_t out_len = 0; + qbe_outf = open_memstream(&out_buf, &out_len); + if (!qbe_outf) { + fclose(inf); + JS_FreeCString(js, ir); + return JS_ThrowInternalError(js, "os.qbe: open_memstream failed"); + } + + /* Run the QBE pipeline */ + parse(inf, "", qbe_dbgfile, qbe_data, qbe_func); + fclose(inf); + + /* Finalize (emit assembler directives) */ + T.emitfin(qbe_outf); + fflush(qbe_outf); + fclose(qbe_outf); + qbe_outf = NULL; + + JS_FreeCString(js, ir); + + /* Return assembly text */ + JSValue result = JS_NewStringLen(js, out_buf, out_len); + free(out_buf); + return result; +} diff --git a/src/qbe/.gitignore b/src/qbe/.gitignore new file mode 100644 index 00000000..09e4118b --- /dev/null +++ b/src/qbe/.gitignore @@ -0,0 +1,6 @@ +*.o +qbe +config.h +.comfile +*.out +*~ diff --git a/src/qbe/LICENSE b/src/qbe/LICENSE new file mode 100644 index 00000000..5e542f19 --- /dev/null +++ b/src/qbe/LICENSE @@ -0,0 +1,19 @@ +© 2015-2026 Quentin Carbonneaux + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/qbe/Makefile b/src/qbe/Makefile new file mode 100644 index 00000000..c3bbefca --- /dev/null +++ b/src/qbe/Makefile @@ -0,0 +1,103 @@ +.POSIX: +.SUFFIXES: .o .c + +PREFIX = /usr/local +BINDIR = $(PREFIX)/bin + +COMMOBJ = main.o util.o parse.o abi.o cfg.o mem.o ssa.o alias.o load.o \ + copy.o fold.o gvn.o gcm.o simpl.o ifopt.o live.o spill.o rega.o \ + emit.o +AMD64OBJ = amd64/targ.o amd64/sysv.o amd64/isel.o amd64/emit.o amd64/winabi.o +ARM64OBJ = arm64/targ.o arm64/abi.o arm64/isel.o arm64/emit.o +RV64OBJ = rv64/targ.o rv64/abi.o rv64/isel.o rv64/emit.o +OBJ = $(COMMOBJ) $(AMD64OBJ) $(ARM64OBJ) $(RV64OBJ) + +SRCALL = $(OBJ:.o=.c) + +CC = cc +CFLAGS = -std=c99 -g -Wall -Wextra -Wpedantic + +qbe: $(OBJ) + $(CC) $(LDFLAGS) $(OBJ) -o $@ + +.c.o: + $(CC) $(CFLAGS) -c $< -o $@ + +$(OBJ): all.h ops.h +$(AMD64OBJ): amd64/all.h +$(ARM64OBJ): arm64/all.h +$(RV64OBJ): rv64/all.h +main.o: config.h + +config.h: + @case `uname` in \ + *Darwin*) \ + case `uname -m` in \ + *arm64*) \ + echo "#define Deftgt T_arm64_apple";\ + ;; \ + *) \ + echo "#define Deftgt T_amd64_apple";\ + ;; \ + esac \ + ;; \ + *) \ + case `uname -m` in \ + *aarch64*|*arm64*) \ + echo "#define Deftgt T_arm64"; \ + ;; \ + *riscv64*) \ + echo "#define Deftgt T_rv64"; \ + ;; \ + *) \ + echo "#define Deftgt T_amd64_sysv";\ + ;; \ + esac \ + ;; \ + esac > $@ + +install: qbe + mkdir -p "$(DESTDIR)$(BINDIR)" + install -m755 qbe "$(DESTDIR)$(BINDIR)/qbe" + +uninstall: + rm -f "$(DESTDIR)$(BINDIR)/qbe" + +clean: + rm -f *.o */*.o qbe + +clean-gen: clean + rm -f config.h + +check: qbe + tools/test.sh all + +check-x86_64: qbe + TARGET=x86_64 tools/test.sh all + +check-arm64: qbe + TARGET=arm64 tools/test.sh all + +check-rv64: qbe + TARGET=rv64 tools/test.sh all + +check-amd64_win: qbe + TARGET=amd64_win tools/test.sh all + +src: + @echo $(SRCALL) + +80: + @for F in $(SRCALL); \ + do \ + awk "{ \ + gsub(/\\t/, \" \"); \ + if (length(\$$0) > $@) \ + printf(\"$$F:%d: %s\\n\", NR, \$$0); \ + }" < $$F; \ + done + +wc: + @wc -l $(SRCALL) + +.PHONY: clean clean-gen check check-arm64 check-rv64 src 80 wc install uninstall diff --git a/src/qbe/README b/src/qbe/README new file mode 100644 index 00000000..1f51d7ce --- /dev/null +++ b/src/qbe/README @@ -0,0 +1,18 @@ +QBE - Backend Compiler http://c9x.me/compile/ + +doc/ Documentation. +minic/ An example C frontend for QBE. +tools/ Miscellaneous tools (testing). +test/ Tests. +amd64/ +arm64/ +rv64/ Architecture-specific code. + +The LICENSE file applies to all files distributed. + +- Compilation and Installation + +Invoke make in this directory to create the executable +file qbe. Install using 'make install', the standard +DESTDIR and PREFIX environment variables are supported. +Alternatively, you may simply copy the qbe binary. diff --git a/src/qbe/abi.c b/src/qbe/abi.c new file mode 100644 index 00000000..9c834972 --- /dev/null +++ b/src/qbe/abi.c @@ -0,0 +1,25 @@ +#include "all.h" + +/* eliminate sub-word abi op + * variants for targets that + * treat char/short/... as + * words with arbitrary high + * bits + */ +void +elimsb(Fn *fn) +{ + Blk *b; + Ins *i; + + for (b=fn->start; b; b=b->link) { + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (isargbh(i->op)) + i->op = Oarg; + if (isparbh(i->op)) + i->op = Opar; + } + if (isretbh(b->jmp.type)) + b->jmp.type = Jretw; + } +} diff --git a/src/qbe/alias.c b/src/qbe/alias.c new file mode 100644 index 00000000..9b410847 --- /dev/null +++ b/src/qbe/alias.c @@ -0,0 +1,222 @@ +#include "all.h" + +void +getalias(Alias *a, Ref r, Fn *fn) +{ + Con *c; + + switch (rtype(r)) { + default: + die("unreachable"); + case RTmp: + *a = fn->tmp[r.val].alias; + if (astack(a->type)) + a->type = a->slot->type; + assert(a->type != ABot); + break; + case RCon: + c = &fn->con[r.val]; + if (c->type == CAddr) { + a->type = ASym; + a->u.sym = c->sym; + } else + a->type = ACon; + a->offset = c->bits.i; + a->slot = 0; + break; + } +} + +int +alias(Ref p, int op, int sp, Ref q, int sq, int *delta, Fn *fn) +{ + Alias ap, aq; + int ovlap; + + getalias(&ap, p, fn); + getalias(&aq, q, fn); + ap.offset += op; + /* when delta is meaningful (ovlap == 1), + * we do not overflow int because sp and + * sq are bounded by 2^28 */ + *delta = ap.offset - aq.offset; + ovlap = ap.offset < aq.offset + sq && aq.offset < ap.offset + sp; + + if (astack(ap.type) && astack(aq.type)) { + /* if both are offsets of the same + * stack slot, they alias iif they + * overlap */ + if (ap.base == aq.base && ovlap) + return MustAlias; + return NoAlias; + } + + if (ap.type == ASym && aq.type == ASym) { + /* they conservatively alias if the + * symbols are different, or they + * alias for sure if they overlap */ + if (!symeq(ap.u.sym, aq.u.sym)) + return MayAlias; + if (ovlap) + return MustAlias; + return NoAlias; + } + + if ((ap.type == ACon && aq.type == ACon) + || (ap.type == aq.type && ap.base == aq.base)) { + assert(ap.type == ACon || ap.type == AUnk); + /* if they have the same base, we + * can rely on the offsets only */ + if (ovlap) + return MustAlias; + return NoAlias; + } + + /* if one of the two is unknown + * there may be aliasing unless + * the other is provably local */ + if (ap.type == AUnk && aq.type != ALoc) + return MayAlias; + if (aq.type == AUnk && ap.type != ALoc) + return MayAlias; + + return NoAlias; +} + +int +escapes(Ref r, Fn *fn) +{ + Alias *a; + + if (rtype(r) != RTmp) + return 1; + a = &fn->tmp[r.val].alias; + return !astack(a->type) || a->slot->type == AEsc; +} + +static void +esc(Ref r, Fn *fn) +{ + Alias *a; + + assert(rtype(r) <= RType); + if (rtype(r) == RTmp) { + a = &fn->tmp[r.val].alias; + if (astack(a->type)) + a->slot->type = AEsc; + } +} + +static void +store(Ref r, int sz, Fn *fn) +{ + Alias *a; + int64_t off; + bits m; + + if (rtype(r) == RTmp) { + a = &fn->tmp[r.val].alias; + if (a->slot) { + assert(astack(a->type)); + off = a->offset; + if (sz >= NBit + || (off < 0 || off >= NBit)) + m = -1; + else + m = (BIT(sz) - 1) << off; + a->slot->u.loc.m |= m; + } + } +} + +void +fillalias(Fn *fn) +{ + uint n; + int t, sz; + int64_t x; + Blk *b; + Phi *p; + Ins *i; + Con *c; + Alias *a, a0, a1; + + for (t=0; tntmp; t++) + fn->tmp[t].alias.type = ABot; + for (n=0; nnblk; ++n) { + b = fn->rpo[n]; + for (p=b->phi; p; p=p->link) { + assert(rtype(p->to) == RTmp); + a = &fn->tmp[p->to.val].alias; + assert(a->type == ABot); + a->type = AUnk; + a->base = p->to.val; + a->offset = 0; + a->slot = 0; + } + for (i=b->ins; i<&b->ins[b->nins]; ++i) { + a = 0; + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + a = &fn->tmp[i->to.val].alias; + assert(a->type == ABot); + if (Oalloc <= i->op && i->op <= Oalloc1) { + a->type = ALoc; + a->slot = a; + a->u.loc.sz = -1; + if (rtype(i->arg[0]) == RCon) { + c = &fn->con[i->arg[0].val]; + x = c->bits.i; + if (c->type == CBits) + if (0 <= x && x <= NBit) + a->u.loc.sz = x; + } + } else { + a->type = AUnk; + a->slot = 0; + } + a->base = i->to.val; + a->offset = 0; + } + if (i->op == Ocopy) { + assert(a); + getalias(a, i->arg[0], fn); + } + if (i->op == Oadd) { + getalias(&a0, i->arg[0], fn); + getalias(&a1, i->arg[1], fn); + if (a0.type == ACon) { + *a = a1; + a->offset += a0.offset; + } + else if (a1.type == ACon) { + *a = a0; + a->offset += a1.offset; + } + } + if (req(i->to, R) || a->type == AUnk) + if (i->op != Oblit0) { + if (!isload(i->op)) + esc(i->arg[0], fn); + if (!isstore(i->op)) + if (i->op != Oargc) + esc(i->arg[1], fn); + } + if (i->op == Oblit0) { + ++i; + assert(i->op == Oblit1); + assert(rtype(i->arg[0]) == RInt); + sz = abs(rsval(i->arg[0])); + store((i-1)->arg[1], sz, fn); + } + if (isstore(i->op)) + store(i->arg[1], storesz(i), fn); + } + if (b->jmp.type != Jretc) + esc(b->jmp.arg, fn); + } + for (b=fn->start; b; b=b->link) + for (p=b->phi; p; p=p->link) + for (n=0; nnarg; n++) + esc(p->arg[n], fn); +} diff --git a/src/qbe/all.h b/src/qbe/all.h new file mode 100644 index 00000000..9e1e633e --- /dev/null +++ b/src/qbe/all.h @@ -0,0 +1,631 @@ +#include +#include +#include +#include +#include +#include + +#define MAKESURE(what, x) typedef char make_sure_##what[(x)?1:-1] +#define die(...) die_(__FILE__, __VA_ARGS__) + +typedef unsigned char uchar; +typedef unsigned int uint; +typedef unsigned long ulong; +typedef unsigned long long bits; + +typedef struct BSet BSet; +typedef struct Ref Ref; +typedef struct Op Op; +typedef struct Ins Ins; +typedef struct Phi Phi; +typedef struct Blk Blk; +typedef struct Use Use; +typedef struct Sym Sym; +typedef struct Num Num; +typedef struct Alias Alias; +typedef struct Tmp Tmp; +typedef struct Con Con; +typedef struct Addr Mem; +typedef struct Fn Fn; +typedef struct Typ Typ; +typedef struct Field Field; +typedef struct Dat Dat; +typedef struct Lnk Lnk; +typedef struct Target Target; + +enum { + NString = 80, + NIns = 1 << 20, + NAlign = 3, + NField = 32, + NBit = CHAR_BIT * sizeof(bits), +}; + +struct Target { + char name[16]; + char apple; + char windows; + int gpr0; /* first general purpose reg */ + int ngpr; + int fpr0; /* first floating point reg */ + int nfpr; + bits rglob; /* globally live regs (e.g., sp, fp) */ + int nrglob; + int *rsave; /* caller-save */ + int nrsave[2]; + bits (*retregs)(Ref, int[2]); + bits (*argregs)(Ref, int[2]); + int (*memargs)(int); + void (*abi0)(Fn *); + void (*abi1)(Fn *); + void (*isel)(Fn *); + void (*emitfn)(Fn *, FILE *); + void (*emitfin)(FILE *); + char asloc[4]; + char assym[4]; + uint cansel:1; +}; + +#define BIT(n) ((bits)1 << (n)) + +enum { + RXX = 0, + Tmp0 = NBit, /* first non-reg temporary */ +}; + +struct BSet { + uint nt; + bits *t; +}; + +struct Ref { + uint type:3; + uint val:29; +}; + +enum { + RTmp, + RCon, + RInt, + RType, /* last kind to come out of the parser */ + RSlot, + RCall, + RMem, +}; + +#define R (Ref){RTmp, 0} +#define UNDEF (Ref){RCon, 0} /* represents uninitialized data */ +#define CON_Z (Ref){RCon, 1} +#define TMP(x) (Ref){RTmp, x} +#define CON(x) (Ref){RCon, x} +#define SLOT(x) (Ref){RSlot, (x)&0x1fffffff} +#define TYPE(x) (Ref){RType, x} +#define CALL(x) (Ref){RCall, x} +#define MEM(x) (Ref){RMem, x} +#define INT(x) (Ref){RInt, (x)&0x1fffffff} + +static inline int req(Ref a, Ref b) +{ + return a.type == b.type && a.val == b.val; +} + +static inline int rtype(Ref r) +{ + if (req(r, R)) + return -1; + return r.type; +} + +static inline int rsval(Ref r) +{ + return ((int)r.val ^ 0x10000000) - 0x10000000; +} + +enum CmpI { + Cieq, + Cine, + Cisge, + Cisgt, + Cisle, + Cislt, + Ciuge, + Ciugt, + Ciule, + Ciult, + NCmpI, +}; + +enum CmpF { + Cfeq, + Cfge, + Cfgt, + Cfle, + Cflt, + Cfne, + Cfo, + Cfuo, + NCmpF, + NCmp = NCmpI + NCmpF, +}; + +enum O { + Oxxx, +#define O(op, x, y) O##op, + #include "ops.h" + NOp, +}; + +enum J { + Jxxx, +#define JMPS(X) \ + X(retw) X(retl) X(rets) X(retd) \ + X(retsb) X(retub) X(retsh) X(retuh) \ + X(retc) X(ret0) X(jmp) X(jnz) \ + X(jfieq) X(jfine) X(jfisge) X(jfisgt) \ + X(jfisle) X(jfislt) X(jfiuge) X(jfiugt) \ + X(jfiule) X(jfiult) X(jffeq) X(jffge) \ + X(jffgt) X(jffle) X(jfflt) X(jffne) \ + X(jffo) X(jffuo) X(hlt) +#define X(j) J##j, + JMPS(X) +#undef X + NJmp +}; + +enum { + Ocmpw = Oceqw, + Ocmpw1 = Ocultw, + Ocmpl = Oceql, + Ocmpl1 = Ocultl, + Ocmps = Oceqs, + Ocmps1 = Ocuos, + Ocmpd = Oceqd, + Ocmpd1 = Ocuod, + Oalloc = Oalloc4, + Oalloc1 = Oalloc16, + Oflag = Oflagieq, + Oflag1 = Oflagfuo, + Oxsel = Oxselieq, + Oxsel1 = Oxselfuo, + NPubOp = Onop, + Jjf = Jjfieq, + Jjf1 = Jjffuo, +}; + +#define INRANGE(x, l, u) ((unsigned)(x) - l <= u - l) /* linear in x */ +#define isstore(o) INRANGE(o, Ostoreb, Ostored) +#define isload(o) INRANGE(o, Oloadsb, Oload) +#define isalloc(o) INRANGE(o, Oalloc4, Oalloc16) +#define isext(o) INRANGE(o, Oextsb, Oextuw) +#define ispar(o) INRANGE(o, Opar, Opare) +#define isarg(o) INRANGE(o, Oarg, Oargv) +#define isret(j) INRANGE(j, Jretw, Jret0) +#define isparbh(o) INRANGE(o, Oparsb, Oparuh) +#define isargbh(o) INRANGE(o, Oargsb, Oarguh) +#define isretbh(j) INRANGE(j, Jretsb, Jretuh) +#define isxsel(o) INRANGE(o, Oxsel, Oxsel1) + +enum { + Kx = -1, /* "top" class (see usecheck() and clsmerge()) */ + Kw, + Kl, + Ks, + Kd +}; + +#define KWIDE(k) ((k)&1) +#define KBASE(k) ((k)>>1) + +struct Op { + char *name; + short argcls[2][4]; + uint canfold:1; + uint hasid:1; /* op identity value? */ + uint idval:1; /* identity value 0/1 */ + uint commutes:1; /* commutative op? */ + uint assoc:1; /* associative op? */ + uint idemp:1; /* idempotent op? */ + uint cmpeqwl:1; /* Kl/Kw cmp eq/ne? */ + uint cmplgtewl:1; /* Kl/Kw cmp lt/gt/le/ge? */ + uint eqval:1; /* 1 for eq; 0 for ne */ + uint pinned:1; /* GCM pinned op? */ +}; + +struct Ins { + uint op:30; + uint cls:2; + Ref to; + Ref arg[2]; +}; + +struct Phi { + Ref to; + Ref *arg; + Blk **blk; + uint narg; + short cls; + uint visit:1; + Phi *link; +}; + +struct Blk { + Phi *phi; + Ins *ins; + uint nins; + struct { + short type; + Ref arg; + } jmp; + Blk *s1; + Blk *s2; + Blk *link; + + uint id; + uint visit; + + Blk *idom; + Blk *dom, *dlink; + Blk **fron; + uint nfron; + int depth; + + Blk **pred; + uint npred; + BSet in[1], out[1], gen[1]; + int nlive[2]; + int loop; + char name[NString]; +}; + +struct Use { + enum { + UXXX, + UPhi, + UIns, + UJmp, + } type; + uint bid; + union { + Ins *ins; + Phi *phi; + } u; +}; + +struct Sym { + enum { + SGlo, + SThr, + } type; + uint32_t id; +}; + +struct Num { + uchar n; + uchar nl, nr; + Ref l, r; +}; + +enum { + NoAlias, + MayAlias, + MustAlias +}; + +struct Alias { + enum { + ABot = 0, + ALoc = 1, /* stack local */ + ACon = 2, + AEsc = 3, /* stack escaping */ + ASym = 4, + AUnk = 6, + #define astack(t) ((t) & 1) + } type; + int base; + int64_t offset; + union { + Sym sym; + struct { + int sz; /* -1 if > NBit */ + bits m; + } loc; + } u; + Alias *slot; +}; + +struct Tmp { + char name[NString]; + Ins *def; + Use *use; + uint ndef, nuse; + uint bid; /* id of a defining block */ + uint cost; + int slot; /* -1 for unset */ + short cls; + struct { + int r; /* register or -1 */ + int w; /* weight */ + bits m; /* avoid these registers */ + } hint; + int phi; + Alias alias; + enum { + WFull, + Wsb, /* must match Oload/Oext order */ + Wub, + Wsh, + Wuh, + Wsw, + Wuw + } width; + int visit; + uint gcmbid; +}; + +struct Con { + enum { + CUndef, + CBits, + CAddr, + } type; + Sym sym; + union { + int64_t i; + double d; + float s; + } bits; + char flt; /* 1 to print as s, 2 to print as d */ +}; + +typedef struct Addr Addr; + +struct Addr { /* amd64 addressing */ + Con offset; + Ref base; + Ref index; + int scale; +}; + +struct Lnk { + char export; + char thread; + char common; + char align; + char *sec; + char *secf; +}; + +struct Fn { + Blk *start; + Tmp *tmp; + Con *con; + Mem *mem; + int ntmp; + int ncon; + int nmem; + uint nblk; + int retty; /* index in typ[], -1 if no aggregate return */ + Ref retr; + Blk **rpo; + bits reg; + int slot; + int salign; + char vararg; + char dynalloc; + char leaf; + char name[NString]; + Lnk lnk; +}; + +struct Typ { + char name[NString]; + char isdark; + char isunion; + int align; + uint64_t size; + uint nunion; + struct Field { + enum { + FEnd, + Fb, + Fh, + Fw, + Fl, + Fs, + Fd, + FPad, + FTyp, + } type; + uint len; /* or index in typ[] for FTyp */ + } (*fields)[NField+1]; +}; + +struct Dat { + enum { + DStart, + DEnd, + DB, + DH, + DW, + DL, + DZ + } type; + char *name; + Lnk *lnk; + union { + int64_t num; + double fltd; + float flts; + char *str; + struct { + char *name; + int64_t off; + } ref; + } u; + char isref; + char isstr; +}; + +/* main.c */ +extern Target T; +extern char debug['Z'+1]; + +/* util.c */ +typedef enum { + PHeap, /* free() necessary */ + PFn, /* discarded after processing the function */ +} Pool; + +extern Typ *typ; +extern Ins insb[NIns], *curi; +uint32_t hash(char *); +void die_(char *, char *, ...) __attribute__((noreturn)); +void *emalloc(size_t); +void *alloc(size_t); +void freeall(void); +void *vnew(ulong, size_t, Pool); +void vfree(void *); +void vgrow(void *, ulong); +void addins(Ins **, uint *, Ins *); +void addbins(Ins **, uint *, Blk *); +void strf(char[NString], char *, ...); +uint32_t intern(char *); +char *str(uint32_t); +int argcls(Ins *, int); +int isreg(Ref); +int iscmp(int, int *, int *); +void igroup(Blk *, Ins *, Ins **, Ins **); +void emit(int, int, Ref, Ref, Ref); +void emiti(Ins); +void idup(Blk *, Ins *, ulong); +Ins *icpy(Ins *, Ins *, ulong); +int cmpop(int); +int cmpneg(int); +int cmpwlneg(int); +int clsmerge(short *, short); +int phicls(int, Tmp *); +uint phiargn(Phi *, Blk *); +Ref phiarg(Phi *, Blk *); +Ref newtmp(char *, int, Fn *); +void chuse(Ref, int, Fn *); +int symeq(Sym, Sym); +Ref newcon(Con *, Fn *); +Ref getcon(int64_t, Fn *); +int addcon(Con *, Con *, int); +int isconbits(Fn *fn, Ref r, int64_t *v); +void salloc(Ref, Ref, Fn *); +void dumpts(BSet *, Tmp *, FILE *); +void runmatch(uchar *, Num *, Ref, Ref *); +void bsinit(BSet *, uint); +void bszero(BSet *); +uint bscount(BSet *); +void bsset(BSet *, uint); +void bsclr(BSet *, uint); +void bscopy(BSet *, BSet *); +void bsunion(BSet *, BSet *); +void bsinter(BSet *, BSet *); +void bsdiff(BSet *, BSet *); +int bsequal(BSet *, BSet *); +int bsiter(BSet *, int *); + +static inline int +bshas(BSet *bs, uint elt) +{ + assert(elt < bs->nt * NBit); + return (bs->t[elt/NBit] & BIT(elt%NBit)) != 0; +} + +/* parse.c */ +extern Op optab[NOp]; +void parse(FILE *, char *, void (char *), void (Dat *), void (Fn *)); +void printfn(Fn *, FILE *); +void printref(Ref, Fn *, FILE *); +void err(char *, ...) __attribute__((noreturn)); + +/* abi.c */ +void elimsb(Fn *); + +/* cfg.c */ +Blk *newblk(void); +void fillpreds(Fn *); +void fillcfg(Fn *); +void filldom(Fn *); +int sdom(Blk *, Blk *); +int dom(Blk *, Blk *); +void fillfron(Fn *); +void loopiter(Fn *, void (*)(Blk *, Blk *)); +void filldepth(Fn *); +Blk *lca(Blk *, Blk *); +void fillloop(Fn *); +void simpljmp(Fn *); +int reaches(Fn *, Blk *, Blk *); +int reachesnotvia(Fn *, Blk *, Blk *, Blk *); +int ifgraph(Blk *, Blk **, Blk **, Blk **); +void simplcfg(Fn *); + +/* mem.c */ +void promote(Fn *); +void coalesce(Fn *); + +/* alias.c */ +void fillalias(Fn *); +void getalias(Alias *, Ref, Fn *); +int alias(Ref, int, int, Ref, int, int *, Fn *); +int escapes(Ref, Fn *); + +/* load.c */ +int loadsz(Ins *); +int storesz(Ins *); +void loadopt(Fn *); + +/* ssa.c */ +void adduse(Tmp *, int, Blk *, ...); +void filluse(Fn *); +void ssa(Fn *); +void ssacheck(Fn *); + +/* copy.c */ +void narrowpars(Fn *fn); +Ref copyref(Fn *, Blk *, Ins *); +Ref phicopyref(Fn *, Blk *, Phi *); + +/* fold.c */ +int foldint(Con *, int, int, Con *, Con *); +Ref foldref(Fn *, Ins *); + +/* gvn.c */ +extern Ref con01[2]; /* 0 and 1 */ +int zeroval(Fn *, Blk *, Ref, int, int *); +void gvn(Fn *); + +/* gcm.c */ +int pinned(Ins *); +void gcm(Fn *); + +/* ifopt.c */ +void ifconvert(Fn *fn); + +/* simpl.c */ +void simpl(Fn *); + +/* live.c */ +void liveon(BSet *, Blk *, Blk *); +void filllive(Fn *); + +/* spill.c */ +void fillcost(Fn *); +void spill(Fn *); + +/* rega.c */ +void rega(Fn *); + +/* emit.c */ +void emitfnlnk(char *, Lnk *, FILE *); +void emitdat(Dat *, FILE *); +void emitdbgfile(char *, FILE *); +void emitdbgloc(uint, uint, FILE *); +int stashbits(bits, int); +void elf_emitfnfin(char *, FILE *); +void elf_emitfin(FILE *); +void macho_emitfin(FILE *); +void pe_emitfin(FILE *); diff --git a/src/qbe/amd64/all.h b/src/qbe/amd64/all.h new file mode 100644 index 00000000..8946dbb3 --- /dev/null +++ b/src/qbe/amd64/all.h @@ -0,0 +1,82 @@ +#include "../all.h" + +typedef struct Amd64Op Amd64Op; + +enum Amd64Reg { + RAX = RXX+1, /* caller-save */ + RCX, /* caller-save */ + RDX, /* caller-save */ + RSI, /* caller-save on sysv, callee-save on win */ + RDI, /* caller-save on sysv, callee-save on win */ + R8, /* caller-save */ + R9, /* caller-save */ + R10, /* caller-save */ + R11, /* caller-save */ + + RBX, /* callee-save */ + R12, + R13, + R14, + R15, + + RBP, /* globally live */ + RSP, + + XMM0, /* sse */ + XMM1, + XMM2, + XMM3, + XMM4, + XMM5, + XMM6, + XMM7, + XMM8, + XMM9, + XMM10, + XMM11, + XMM12, + XMM13, + XMM14, + XMM15, + + NFPR = XMM14 - XMM0 + 1, /* reserve XMM15 */ + NGPR = RSP - RAX + 1, + NFPS = NFPR, + + NGPS_SYSV = R11 - RAX + 1, + NCLR_SYSV = R15 - RBX + 1, + + NGPS_WIN = R11 - RAX + 1 - 2, /* -2 for RDI/RDI */ + NCLR_WIN = R15 - RBX + 1 + 2, /* +2 for RDI/RDI */ +}; +MAKESURE(reg_not_tmp, XMM15 < (int)Tmp0); + +struct Amd64Op { + char nmem; + char zflag; + char lflag; +}; + +/* targ.c */ +extern Amd64Op amd64_op[]; + +/* sysv.c (abi) */ +extern int amd64_sysv_rsave[]; +extern int amd64_sysv_rclob[]; +bits amd64_sysv_retregs(Ref, int[2]); +bits amd64_sysv_argregs(Ref, int[2]); +void amd64_sysv_abi(Fn *); + +/* winabi.c */ +extern int amd64_winabi_rsave[]; +extern int amd64_winabi_rclob[]; +bits amd64_winabi_retregs(Ref, int[2]); +bits amd64_winabi_argregs(Ref, int[2]); +void amd64_winabi_abi(Fn *); + +/* isel.c */ +void amd64_isel(Fn *); + +/* emit.c */ +void amd64_sysv_emitfn(Fn *, FILE *); +void amd64_winabi_emitfn(Fn *, FILE *); diff --git a/src/qbe/amd64/emit.c b/src/qbe/amd64/emit.c new file mode 100644 index 00000000..4a3bb994 --- /dev/null +++ b/src/qbe/amd64/emit.c @@ -0,0 +1,844 @@ +#include "all.h" + + +typedef struct E E; + +struct E { + FILE *f; + Fn *fn; + int fp; + uint64_t fsz; + int nclob; +}; + +#define CMP(X) \ + X(Ciule, "be", "a") \ + X(Ciult, "b", "ae") \ + X(Cisle, "le", "g") \ + X(Cislt, "l", "ge") \ + X(Cisgt, "g", "le") \ + X(Cisge, "ge", "l") \ + X(Ciugt, "a", "be") \ + X(Ciuge, "ae", "b") \ + X(Cieq, "z", "nz") \ + X(Cine, "nz", "z") \ + X(NCmpI+Cfle, "be", "a") \ + X(NCmpI+Cflt, "b", "ae") \ + X(NCmpI+Cfgt, "a", "be") \ + X(NCmpI+Cfge, "ae", "b") \ + X(NCmpI+Cfo, "np", "p") \ + X(NCmpI+Cfuo, "p", "np") + +enum { + SLong = 0, + SWord = 1, + SShort = 2, + SByte = 3, + + Ki = -1, /* matches Kw and Kl */ + Ka = -2, /* matches all classes */ +}; + +/* Instruction format strings: + * + * if the format string starts with -, the instruction + * is assumed to be 3-address and is put in 2-address + * mode using an extra mov if necessary + * + * if the format string starts with +, the same as the + * above applies, but commutativity is also assumed + * + * %k is used to set the class of the instruction, + * it'll expand to "l", "q", "ss", "sd", depending + * on the instruction class + * %0 designates the first argument + * %1 designates the second argument + * %= designates the result + * + * if %k is not used, a prefix to 0, 1, or = must be + * added, it can be: + * M - memory reference + * L - long (64 bits) + * W - word (32 bits) + * H - short (16 bits) + * B - byte (8 bits) + * S - single precision float + * D - double precision float + */ +static struct { + short op; + short cls; + char *fmt; +} omap[] = { + { Oadd, Ka, "+add%k %1, %=" }, + { Osub, Ka, "-sub%k %1, %=" }, + { Oand, Ki, "+and%k %1, %=" }, + { Oor, Ki, "+or%k %1, %=" }, + { Oxor, Ki, "+xor%k %1, %=" }, + { Osar, Ki, "-sar%k %B1, %=" }, + { Oshr, Ki, "-shr%k %B1, %=" }, + { Oshl, Ki, "-shl%k %B1, %=" }, + { Omul, Ki, "+imul%k %1, %=" }, + { Omul, Ks, "+mulss %1, %=" }, + { Omul, Kd, "+mulsd %1, %=" }, + { Odiv, Ka, "-div%k %1, %=" }, + { Ostorel, Ka, "movq %L0, %M1" }, + { Ostorew, Ka, "movl %W0, %M1" }, + { Ostoreh, Ka, "movw %H0, %M1" }, + { Ostoreb, Ka, "movb %B0, %M1" }, + { Ostores, Ka, "movss %S0, %M1" }, + { Ostored, Ka, "movsd %D0, %M1" }, + { Oload, Ka, "mov%k %M0, %=" }, + { Oloadsw, Kl, "movslq %M0, %L=" }, + { Oloadsw, Kw, "movl %M0, %W=" }, + { Oloaduw, Ki, "movl %M0, %W=" }, + { Oloadsh, Ki, "movsw%k %M0, %=" }, + { Oloaduh, Ki, "movzw%k %M0, %=" }, + { Oloadsb, Ki, "movsb%k %M0, %=" }, + { Oloadub, Ki, "movzb%k %M0, %=" }, + { Oextsw, Kl, "movslq %W0, %L=" }, + { Oextuw, Kl, "movl %W0, %W=" }, + { Oextsh, Ki, "movsw%k %H0, %=" }, + { Oextuh, Ki, "movzw%k %H0, %=" }, + { Oextsb, Ki, "movsb%k %B0, %=" }, + { Oextub, Ki, "movzb%k %B0, %=" }, + + { Oexts, Kd, "cvtss2sd %0, %=" }, + { Otruncd, Ks, "cvtsd2ss %0, %=" }, + { Ostosi, Ki, "cvttss2si%k %0, %=" }, + { Odtosi, Ki, "cvttsd2si%k %0, %=" }, + { Oswtof, Ka, "cvtsi2%k %W0, %=" }, + { Osltof, Ka, "cvtsi2%k %L0, %=" }, + { Ocast, Ki, "movq %D0, %L=" }, + { Ocast, Ka, "movq %L0, %D=" }, + + { Oaddr, Ki, "lea%k %M0, %=" }, + { Oswap, Ki, "xchg%k %0, %1" }, + { Osign, Kl, "cqto" }, + { Osign, Kw, "cltd" }, + { Oxdiv, Ki, "div%k %0" }, + { Oxidiv, Ki, "idiv%k %0" }, + { Oxcmp, Ks, "ucomiss %S0, %S1" }, + { Oxcmp, Kd, "ucomisd %D0, %D1" }, + { Oxcmp, Ki, "cmp%k %0, %1" }, + { Oxtest, Ki, "test%k %0, %1" }, +#define X(c, s, _) \ + { Oflag+c, Ki, "set" s " %B=\n\tmovzb%k %B=, %=" }, + CMP(X) +#undef X + { Oflagfeq, Ki, "setz %B=\n\tmovzb%k %B=, %=" }, + { Oflagfne, Ki, "setnz %B=\n\tmovzb%k %B=, %=" }, + { NOp, 0, 0 } +}; + +static char cmov[][2][16] = { +#define X(c, s0, s1) \ + [c] = { \ + "cmov" s0 " %0, %=", \ + "cmov" s1 " %1, %=", \ + }, + CMP(X) +#undef X +}; + +static char *rname[][4] = { + [RAX] = {"rax", "eax", "ax", "al"}, + [RBX] = {"rbx", "ebx", "bx", "bl"}, + [RCX] = {"rcx", "ecx", "cx", "cl"}, + [RDX] = {"rdx", "edx", "dx", "dl"}, + [RSI] = {"rsi", "esi", "si", "sil"}, + [RDI] = {"rdi", "edi", "di", "dil"}, + [RBP] = {"rbp", "ebp", "bp", "bpl"}, + [RSP] = {"rsp", "esp", "sp", "spl"}, + [R8 ] = {"r8" , "r8d", "r8w", "r8b"}, + [R9 ] = {"r9" , "r9d", "r9w", "r9b"}, + [R10] = {"r10", "r10d", "r10w", "r10b"}, + [R11] = {"r11", "r11d", "r11w", "r11b"}, + [R12] = {"r12", "r12d", "r12w", "r12b"}, + [R13] = {"r13", "r13d", "r13w", "r13b"}, + [R14] = {"r14", "r14d", "r14w", "r14b"}, + [R15] = {"r15", "r15d", "r15w", "r15b"}, +}; + + +static int +slot(Ref r, E *e) +{ + int s; + + s = rsval(r); + assert(s <= e->fn->slot); + /* specific to NAlign == 3 */ + if (s < 0) { + if (e->fp == RSP) + return 4*-s - 8 + e->fsz + e->nclob*8; + else + return 4*-s; + } + else if (e->fp == RSP) + return 4*s + e->nclob*8; + else if (e->fn->vararg) { + if (T.windows) + return -4 * (e->fn->slot - s); + else + return -176 + -4 * (e->fn->slot - s); + } else + return -4 * (e->fn->slot - s); +} + +static void +emitcon(Con *con, E *e) +{ + char *p, *l; + + switch (con->type) { + case CAddr: + l = str(con->sym.id); + p = l[0] == '"' ? "" : T.assym; + if (con->sym.type == SThr) { + if (T.apple) + fprintf(e->f, "%s%s@TLVP", p, l); + else + fprintf(e->f, "%%fs:%s%s@tpoff", p, l); + } else + fprintf(e->f, "%s%s", p, l); + if (con->bits.i) + fprintf(e->f, "%+"PRId64, con->bits.i); + break; + case CBits: + fprintf(e->f, "%"PRId64, con->bits.i); + break; + default: + die("unreachable"); + } +} + +static char * +regtoa(int reg, int sz) +{ + static char buf[6]; + + assert(reg <= XMM15); + if (reg >= XMM0) { + sprintf(buf, "xmm%d", reg-XMM0); + return buf; + } else + return rname[reg][sz]; +} + +static Ref +getarg(char c, Ins *i) +{ + switch (c) { + case '0': + return i->arg[0]; + case '1': + return i->arg[1]; + case '=': + return i->to; + default: + die("invalid arg letter %c", c); + } +} + +static void emitins(Ins, E *); + +static void +emitcopy(Ref r1, Ref r2, int k, E *e) +{ + Ins icp; + + icp.op = Ocopy; + icp.arg[0] = r2; + icp.to = r1; + icp.cls = k; + emitins(icp, e); +} + +static void +emitf(char *s, Ins *i, E *e) +{ + static char clstoa[][3] = {"l", "q", "ss", "sd"}; + char c; + int sz; + Ref ref; + Mem *m; + Con off; + + switch (*s) { + case '+': + if (req(i->arg[1], i->to)) { + ref = i->arg[0]; + i->arg[0] = i->arg[1]; + i->arg[1] = ref; + } + /* fall through */ + case '-': + assert((!req(i->arg[1], i->to) || req(i->arg[0], i->to)) && + "cannot convert to 2-address"); + emitcopy(i->to, i->arg[0], i->cls, e); + s++; + break; + } + + fputc('\t', e->f); +Next: + while ((c = *s++) != '%') + if (!c) { + fputc('\n', e->f); + return; + } else + fputc(c, e->f); + switch ((c = *s++)) { + case '%': + fputc('%', e->f); + break; + case 'k': + fputs(clstoa[i->cls], e->f); + break; + case '0': + case '1': + case '=': + sz = KWIDE(i->cls) ? SLong : SWord; + s--; + goto Ref; + case 'D': + case 'S': + sz = SLong; /* does not matter for floats */ + Ref: + c = *s++; + ref = getarg(c, i); + switch (rtype(ref)) { + case RTmp: + assert(isreg(ref)); + fprintf(e->f, "%%%s", regtoa(ref.val, sz)); + break; + case RSlot: + fprintf(e->f, "%d(%%%s)", + slot(ref, e), + regtoa(e->fp, SLong) + ); + break; + case RMem: + Mem: + m = &e->fn->mem[ref.val]; + if (rtype(m->base) == RSlot) { + off.type = CBits; + off.bits.i = slot(m->base, e); + addcon(&m->offset, &off, 1); + m->base = TMP(e->fp); + } + if (m->offset.type != CUndef) + emitcon(&m->offset, e); + fputc('(', e->f); + if (!req(m->base, R)) + fprintf(e->f, "%%%s", + regtoa(m->base.val, SLong) + ); + else if (m->offset.type == CAddr) + fprintf(e->f, "%%rip"); + if (!req(m->index, R)) + fprintf(e->f, ", %%%s, %d", + regtoa(m->index.val, SLong), + m->scale + ); + fputc(')', e->f); + break; + case RCon: + fputc('$', e->f); + emitcon(&e->fn->con[ref.val], e); + break; + default: + die("unreachable"); + } + break; + case 'L': + sz = SLong; + goto Ref; + case 'W': + sz = SWord; + goto Ref; + case 'H': + sz = SShort; + goto Ref; + case 'B': + sz = SByte; + goto Ref; + case 'M': + c = *s++; + ref = getarg(c, i); + switch (rtype(ref)) { + case RMem: + goto Mem; + case RSlot: + fprintf(e->f, "%d(%%%s)", + slot(ref, e), + regtoa(e->fp, SLong) + ); + break; + case RCon: + off = e->fn->con[ref.val]; + emitcon(&off, e); + if (off.type == CAddr) + if (off.sym.type != SThr || T.apple) + fprintf(e->f, "(%%rip)"); + break; + case RTmp: + assert(isreg(ref)); + fprintf(e->f, "(%%%s)", regtoa(ref.val, SLong)); + break; + default: + die("unreachable"); + } + break; + default: + die("invalid format specifier %%%c", c); + } + goto Next; +} + +static bits negmask[4] = { + [Ks] = 0x80000000, + [Kd] = 0x8000000000000000, +}; + +static void +emitins(Ins i, E *e) +{ + Ref r; + int64_t val; + int o, t0; + Ins ineg; + Con *con; + char *sym; + + switch (i.op) { + default: + if (isxsel(i.op)) + goto case_Oxsel; + Table: + /* most instructions are just pulled out of + * the table omap[], some special cases are + * detailed below */ + for (o=0;; o++) { + /* this linear search should really be a binary + * search */ + if (omap[o].op == NOp) + die("no match for %s(%c)", + optab[i.op].name, "wlsd"[i.cls]); + if (omap[o].op == i.op) + if (omap[o].cls == i.cls + || (omap[o].cls == Ki && KBASE(i.cls) == 0) + || (omap[o].cls == Ka)) + break; + } + emitf(omap[o].fmt, &i, e); + break; + case Onop: + /* just do nothing for nops, they are inserted + * by some passes */ + break; + case Omul: + /* here, we try to use the 3-addresss form + * of multiplication when possible */ + if (rtype(i.arg[1]) == RCon) { + r = i.arg[0]; + i.arg[0] = i.arg[1]; + i.arg[1] = r; + } + if (KBASE(i.cls) == 0 /* only available for ints */ + && rtype(i.arg[0]) == RCon + && rtype(i.arg[1]) == RTmp) { + emitf("imul%k %0, %1, %=", &i, e); + break; + } + goto Table; + case Osub: + /* we have to use the negation trick to handle + * some 3-address subtractions */ + if (req(i.to, i.arg[1]) && !req(i.arg[0], i.to)) { + ineg = (Ins){Oneg, i.cls, i.to, {i.to}}; + emitins(ineg, e); + emitf("add%k %0, %=", &i, e); + break; + } + goto Table; + case Oneg: + if (!req(i.to, i.arg[0])) + emitf("mov%k %0, %=", &i, e); + if (KBASE(i.cls) == 0) + emitf("neg%k %=", &i, e); + else + fprintf(e->f, + "\txorp%c %sfp%d(%%rip), %%%s\n", + "xxsd"[i.cls], + T.asloc, + stashbits(negmask[i.cls], 16), + regtoa(i.to.val, SLong) + ); + break; + case Odiv: + /* use xmm15 to adjust the instruction when the + * conversion to 2-address in emitf() would fail */ + if (req(i.to, i.arg[1])) { + i.arg[1] = TMP(XMM0+15); + emitf("mov%k %=, %1", &i, e); + emitf("mov%k %0, %=", &i, e); + i.arg[0] = i.to; + } + goto Table; + case Ocopy: + /* copies are used for many things; see my note + * to understand how to load big constants: + * https://c9x.me/notes/2015-09-19.html */ + assert(rtype(i.to) != RMem); + if (req(i.to, R) || req(i.arg[0], R)) + break; + if (req(i.to, i.arg[0])) + break; + t0 = rtype(i.arg[0]); + if (i.cls == Kl + && t0 == RCon + && e->fn->con[i.arg[0].val].type == CBits) { + val = e->fn->con[i.arg[0].val].bits.i; + if (isreg(i.to)) + if (val >= 0 && val <= UINT32_MAX) { + emitf("movl %W0, %W=", &i, e); + break; + } + if (rtype(i.to) == RSlot) + if (val < INT32_MIN || val > INT32_MAX) { + emitf("movl %0, %=", &i, e); + emitf("movl %0>>32, 4+%=", &i, e); + break; + } + } + if (isreg(i.to) + && t0 == RCon + && e->fn->con[i.arg[0].val].type == CAddr) { + emitf("lea%k %M0, %=", &i, e); + break; + } + if (rtype(i.to) == RSlot + && (t0 == RSlot || t0 == RMem)) { + i.cls = KWIDE(i.cls) ? Kd : Ks; + i.arg[1] = TMP(XMM0+15); + emitf("mov%k %0, %1", &i, e); + emitf("mov%k %1, %=", &i, e); + break; + } + /* conveniently, the assembler knows if it + * should use movabsq when reading movq */ + emitf("mov%k %0, %=", &i, e); + break; + case Oaddr: + if (!T.apple + && rtype(i.arg[0]) == RCon + && e->fn->con[i.arg[0].val].sym.type == SThr) { + /* derive the symbol address from the TCB + * address at offset 0 of %fs */ + assert(isreg(i.to)); + con = &e->fn->con[i.arg[0].val]; + sym = str(con->sym.id); + emitf("movq %%fs:0, %L=", &i, e); + fprintf(e->f, "\tleaq %s%s@tpoff", + sym[0] == '"' ? "" : T.assym, sym); + if (con->bits.i) + fprintf(e->f, "%+"PRId64, + con->bits.i); + fprintf(e->f, "(%%%s), %%%s\n", + regtoa(i.to.val, SLong), + regtoa(i.to.val, SLong)); + break; + } + goto Table; + case Ocall: + /* calls simply have a weird syntax in AT&T + * assembly... */ + switch (rtype(i.arg[0])) { + case RCon: + fprintf(e->f, "\tcallq "); + emitcon(&e->fn->con[i.arg[0].val], e); + fprintf(e->f, "\n"); + break; + case RTmp: + emitf("callq *%L0", &i, e); + break; + default: + die("invalid call argument"); + } + break; + case Osalloc: + /* there is no good reason why this is here + * maybe we should split Osalloc in 2 different + * instructions depending on the result + */ + assert(e->fp == RBP); + emitf("subq %L0, %%rsp", &i, e); + if (!req(i.to, R)) + emitcopy(i.to, TMP(RSP), Kl, e); + break; + case Oswap: + if (KBASE(i.cls) == 0) + goto Table; + /* for floats, there is no swap instruction + * so we use xmm15 as a temporary + */ + emitcopy(TMP(XMM0+15), i.arg[0], i.cls, e); + emitcopy(i.arg[0], i.arg[1], i.cls, e); + emitcopy(i.arg[1], TMP(XMM0+15), i.cls, e); + break; + case Odbgloc: + emitdbgloc(i.arg[0].val, i.arg[1].val, e->f); + break; + case_Oxsel: + if (req(i.to, i.arg[1])) + emitf(cmov[i.op-Oxsel][0], &i, e); + else { + if (!req(i.to, i.arg[0])) + emitf("mov %0, %=", &i, e); + emitf(cmov[i.op-Oxsel][1], &i, e); + } + break; + } +} + +static void +sysv_framesz(E *e) +{ + uint64_t i, o, f; + + /* specific to NAlign == 3 */ + o = 0; + if (!e->fn->leaf) { + for (i=0, o=0; ifn->reg >> amd64_sysv_rclob[i]; + o &= 1; + } + f = e->fn->slot; + f = (f + 3) & -4; + if (f > 0 + && e->fp == RSP + && e->fn->salign == 4) + f += 2; + e->fsz = 4*f + 8*o + 176*e->fn->vararg; +} + +void +amd64_sysv_emitfn(Fn *fn, FILE *f) +{ + static char *ctoa[] = { + #define X(c, s, _) [c] = s, + CMP(X) + #undef X + }; + static int id0; + Blk *b, *s; + Ins *i, itmp; + int *r, c, o, n, lbl; + uint p; + E *e; + + e = &(E){.f = f, .fn = fn}; + emitfnlnk(fn->name, &fn->lnk, f); + fputs("\tendbr64\n", f); + if (!fn->leaf || fn->vararg || fn->dynalloc) { + e->fp = RBP; + fputs("\tpushq %rbp\n\tmovq %rsp, %rbp\n", f); + } else + e->fp = RSP; + sysv_framesz(e); + if (e->fsz) + fprintf(f, "\tsubq $%"PRIu64", %%rsp\n", e->fsz); + if (fn->vararg) { + o = -176; + for (r=amd64_sysv_rsave; r<&amd64_sysv_rsave[6]; r++, o+=8) + fprintf(f, "\tmovq %%%s, %d(%%rbp)\n", rname[*r][0], o); + for (n=0; n<8; ++n, o+=16) + fprintf(f, "\tmovaps %%xmm%d, %d(%%rbp)\n", n, o); + } + for (r=amd64_sysv_rclob; r<&amd64_sysv_rclob[NCLR_SYSV]; r++) + if (fn->reg & BIT(*r)) { + itmp.arg[0] = TMP(*r); + emitf("pushq %L0", &itmp, e); + e->nclob++; + } + + for (lbl=0, b=fn->start; b; b=b->link) { + if (lbl || b->npred > 1) { + for (p=0; pnpred; p++) + if (b->pred[p]->id >= b->id) + break; + if (p != b->npred) + fprintf(f, ".p2align 4\n"); + fprintf(f, "%sbb%d:\n", T.asloc, id0+b->id); + } + for (i=b->ins; i!=&b->ins[b->nins]; i++) + emitins(*i, e); + lbl = 1; + switch (b->jmp.type) { + case Jhlt: + fprintf(f, "\tud2\n"); + break; + case Jret0: + if (fn->dynalloc) + fprintf(f, + "\tmovq %%rbp, %%rsp\n" + "\tsubq $%"PRIu64", %%rsp\n", + e->fsz + e->nclob * 8); + for (r=&amd64_sysv_rclob[NCLR_SYSV]; r>amd64_sysv_rclob;) + if (fn->reg & BIT(*--r)) { + itmp.arg[0] = TMP(*r); + emitf("popq %L0", &itmp, e); + } + if (e->fp == RBP) + fputs("\tleave\n", f); + else if (e->fsz) + fprintf(f, + "\taddq $%"PRIu64", %%rsp\n", + e->fsz); + fputs("\tret\n", f); + break; + case Jjmp: + Jmp: + if (b->s1 != b->link) + fprintf(f, "\tjmp %sbb%d\n", + T.asloc, id0+b->s1->id); + else + lbl = 0; + break; + default: + c = b->jmp.type - Jjf; + if (0 <= c && c <= NCmp) { + if (b->link == b->s2) { + s = b->s1; + b->s1 = b->s2; + b->s2 = s; + } else + c = cmpneg(c); + fprintf(f, "\tj%s %sbb%d\n", ctoa[c], + T.asloc, id0+b->s2->id); + goto Jmp; + } + die("unhandled jump %d", b->jmp.type); + } + } + id0 += fn->nblk; + if (!T.apple) + elf_emitfnfin(fn->name, f); +} + +static void +winabi_framesz(E *e) +{ + uint64_t i, o, f; + + /* specific to NAlign == 3 */ + o = 0; + if (!e->fn->leaf) { + for (i=0, o=0; ifn->reg >> amd64_winabi_rclob[i]; + o &= 1; + } + f = e->fn->slot; + f = (f + 3) & -4; + if (f > 0 + && e->fp == RSP + && e->fn->salign == 4) + f += 2; + e->fsz = 4*f + 8*o; +} + +void +amd64_winabi_emitfn(Fn *fn, FILE *f) +{ + static char *ctoa[] = { + #define X(c, s, _) [c] = s, + CMP(X) + #undef X + }; + static int id0; + Blk *b, *s; + Ins *i, itmp; + int *r, c, lbl; + E *e; + + e = &(E){.f = f, .fn = fn}; + emitfnlnk(fn->name, &fn->lnk, f); + fputs("\tendbr64\n", f); + if (fn->vararg) { + fprintf(f, "\tmovq %%rcx, 0x8(%%rsp)\n"); + fprintf(f, "\tmovq %%rdx, 0x10(%%rsp)\n"); + fprintf(f, "\tmovq %%r8, 0x18(%%rsp)\n"); + fprintf(f, "\tmovq %%r9, 0x20(%%rsp)\n"); + } + if (!fn->leaf || fn->vararg || fn->dynalloc) { + e->fp = RBP; + fputs("\tpushq %rbp\n\tmovq %rsp, %rbp\n", f); + } else + e->fp = RSP; + winabi_framesz(e); + if (e->fsz) + fprintf(f, "\tsubq $%"PRIu64", %%rsp\n", e->fsz); + for (r=amd64_winabi_rclob; r<&amd64_winabi_rclob[NCLR_WIN]; r++) + if (fn->reg & BIT(*r)) { + itmp.arg[0] = TMP(*r); + emitf("pushq %L0", &itmp, e); + e->nclob++; + } + + for (lbl=0, b=fn->start; b; b=b->link) { + if (lbl || b->npred > 1) + fprintf(f, "%sbb%d:\n", T.asloc, id0+b->id); + for (i=b->ins; i!=&b->ins[b->nins]; i++) + emitins(*i, e); + lbl = 1; + switch (b->jmp.type) { + case Jhlt: + fprintf(f, "\tud2\n"); + break; + case Jret0: + if (fn->dynalloc) + fprintf(f, + "\tmovq %%rbp, %%rsp\n" + "\tsubq $%"PRIu64", %%rsp\n", + e->fsz + e->nclob * 8); + for (r=&amd64_winabi_rclob[NCLR_WIN]; r>amd64_winabi_rclob;) + if (fn->reg & BIT(*--r)) { + itmp.arg[0] = TMP(*r); + emitf("popq %L0", &itmp, e); + } + if (e->fp == RBP) + fputs("\tleave\n", f); + else if (e->fsz) + fprintf(f, + "\taddq $%"PRIu64", %%rsp\n", + e->fsz); + fputs("\tret\n", f); + break; + case Jjmp: + Jmp: + if (b->s1 != b->link) + fprintf(f, "\tjmp %sbb%d\n", + T.asloc, id0+b->s1->id); + else + lbl = 0; + break; + default: + c = b->jmp.type - Jjf; + if (0 <= c && c <= NCmp) { + if (b->link == b->s2) { + s = b->s1; + b->s1 = b->s2; + b->s2 = s; + } else + c = cmpneg(c); + fprintf(f, "\tj%s %sbb%d\n", ctoa[c], + T.asloc, id0+b->s2->id); + goto Jmp; + } + die("unhandled jump %d", b->jmp.type); + } + } + id0 += fn->nblk; +} diff --git a/src/qbe/amd64/isel.c b/src/qbe/amd64/isel.c new file mode 100644 index 00000000..889c647e --- /dev/null +++ b/src/qbe/amd64/isel.c @@ -0,0 +1,942 @@ +#include "all.h" +#include + +/* For x86_64, do the following: + * + * - check that constants are used only in + * places allowed + * - ensure immediates always fit in 32b + * - expose machine register contraints + * on instructions like division. + * - implement fast locals (the streak of + * constant allocX in the first basic block) + * - recognize complex addressing modes + * + * Invariant: the use counts that are used + * in sel() must be sound. This + * is not so trivial, maybe the + * dce should be moved out... + */ + +static int amatch(Addr *, Num *, Ref, Fn *); + +static int +noimm(Ref r, Fn *fn) +{ + int64_t val; + + if (rtype(r) != RCon) + return 0; + switch (fn->con[r.val].type) { + case CAddr: + /* we only support the 'small' + * code model of the ABI, this + * means that we can always + * address data with 32bits + */ + return 0; + case CBits: + val = fn->con[r.val].bits.i; + return (val < INT32_MIN || val > INT32_MAX); + default: + die("invalid constant"); + } +} + +static int +rslot(Ref r, Fn *fn) +{ + if (rtype(r) != RTmp) + return -1; + return fn->tmp[r.val].slot; +} + +static int +hascon(Ref r, Con **pc, Fn *fn) +{ + switch (rtype(r)) { + case RCon: + *pc = &fn->con[r.val]; + return 1; + case RMem: + *pc = &fn->mem[r.val].offset; + return 1; + default: + return 0; + } +} + +static void +fixarg(Ref *r, int k, Ins *i, Fn *fn) +{ + char buf[32]; + Addr a, *m; + Con cc, *c; + Ref r0, r1, r2, r3; + int s, n, op; + + r1 = r0 = *r; + s = rslot(r0, fn); + op = i ? i->op : Ocopy; + if (KBASE(k) == 1 && rtype(r0) == RCon) { + /* load floating points from memory + * slots, they can't be used as + * immediates + */ + r1 = MEM(fn->nmem); + vgrow(&fn->mem, ++fn->nmem); + memset(&a, 0, sizeof a); + a.offset.type = CAddr; + n = stashbits(fn->con[r0.val].bits.i, KWIDE(k) ? 8 : 4); + /* quote the name so that we do not + * add symbol prefixes on the apple + * target variant + */ + sprintf(buf, "\"%sfp%d\"", T.asloc, n); + a.offset.sym.id = intern(buf); + fn->mem[fn->nmem-1] = a; + } + else if (op == Ocall && r == &i->arg[0] + && rtype(r0) == RCon && fn->con[r0.val].type != CAddr) { + /* use a temporary register so that we + * produce an indirect call + */ + r1 = newtmp("isel", Kl, fn); + emit(Ocopy, Kl, r1, r0, R); + } + else if (op != Ocopy && k == Kl && noimm(r0, fn)) { + /* load constants that do not fit in + * a 32bit signed integer into a + * long temporary + */ + r1 = newtmp("isel", Kl, fn); + emit(Ocopy, Kl, r1, r0, R); + } + else if (s != -1) { + /* load fast locals' addresses into + * temporaries right before the + * instruction + */ + r1 = newtmp("isel", Kl, fn); + emit(Oaddr, Kl, r1, SLOT(s), R); + } + else if (T.apple && hascon(r0, &c, fn) + && c->type == CAddr && c->sym.type == SThr) { + r1 = newtmp("isel", Kl, fn); + if (c->bits.i) { + r2 = newtmp("isel", Kl, fn); + cc = (Con){.type = CBits}; + cc.bits.i = c->bits.i; + r3 = newcon(&cc, fn); + emit(Oadd, Kl, r1, r2, r3); + } else + r2 = r1; + emit(Ocopy, Kl, r2, TMP(RAX), R); + r2 = newtmp("isel", Kl, fn); + r3 = newtmp("isel", Kl, fn); + emit(Ocall, 0, R, r3, CALL(17)); + emit(Ocopy, Kl, TMP(RDI), r2, R); + emit(Oload, Kl, r3, r2, R); + cc = *c; + cc.bits.i = 0; + r3 = newcon(&cc, fn); + emit(Oload, Kl, r2, r3, R); + if (rtype(r0) == RMem) { + m = &fn->mem[r0.val]; + m->offset.type = CUndef; + m->base = r1; + r1 = r0; + } + } + else if (!(isstore(op) && r == &i->arg[1]) + && !isload(op) && op != Ocall && rtype(r0) == RCon + && fn->con[r0.val].type == CAddr) { + /* apple as does not support 32-bit + * absolute addressing, use a rip- + * relative leaq instead + */ + r1 = newtmp("isel", Kl, fn); + emit(Oaddr, Kl, r1, r0, R); + } + else if (rtype(r0) == RMem) { + /* eliminate memory operands of + * the form $foo(%rip, ...) + */ + m = &fn->mem[r0.val]; + if (req(m->base, R)) + if (m->offset.type == CAddr) { + r0 = newtmp("isel", Kl, fn); + emit(Oaddr, Kl, r0, newcon(&m->offset, fn), R); + m->offset.type = CUndef; + m->base = r0; + } + } + else if (isxsel(op) && rtype(*r) == RCon) { + r1 = newtmp("isel", i->cls, fn); + emit(Ocopy, i->cls, r1, *r, R); + } + *r = r1; +} + +static void +seladdr(Ref *r, Num *tn, Fn *fn) +{ + Addr a; + Ref r0; + + r0 = *r; + if (rtype(r0) == RTmp) { + memset(&a, 0, sizeof a); + if (!amatch(&a, tn, r0, fn)) + return; + if (!req(a.base, R)) + if (a.offset.type == CAddr) { + /* apple as does not support + * $foo(%r0, %r1, M); try to + * rewrite it or bail out if + * impossible + */ + if (!req(a.index, R) || rtype(a.base) != RTmp) + return; + else { + a.index = a.base; + a.scale = 1; + a.base = R; + } + } + chuse(r0, -1, fn); + vgrow(&fn->mem, ++fn->nmem); + fn->mem[fn->nmem-1] = a; + chuse(a.base, +1, fn); + chuse(a.index, +1, fn); + *r = MEM(fn->nmem-1); + } +} + +static int +cmpswap(Ref arg[2], int op) +{ + switch (op) { + case NCmpI+Cflt: + case NCmpI+Cfle: + return 1; + case NCmpI+Cfgt: + case NCmpI+Cfge: + return 0; + } + return rtype(arg[0]) == RCon; +} + +static void +selcmp(Ref arg[2], int k, int swap, Fn *fn) +{ + Ref r; + Ins *icmp; + + if (swap) { + r = arg[1]; + arg[1] = arg[0]; + arg[0] = r; + } + emit(Oxcmp, k, R, arg[1], arg[0]); + icmp = curi; + if (rtype(arg[0]) == RCon) { + assert(k != Kw); + icmp->arg[1] = newtmp("isel", k, fn); + emit(Ocopy, k, icmp->arg[1], arg[0], R); + fixarg(&curi->arg[0], k, curi, fn); + } + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); +} + +static void +sel(Ins i, Num *tn, Fn *fn) +{ + Ref r0, r1, tmp[7]; + int x, j, k, kc, sh, swap; + Ins *i0, *i1; + + if (rtype(i.to) == RTmp) + if (!isreg(i.to) && !isreg(i.arg[0]) && !isreg(i.arg[1])) + if (fn->tmp[i.to.val].nuse == 0) { + chuse(i.arg[0], -1, fn); + chuse(i.arg[1], -1, fn); + return; + } + i0 = curi; + k = i.cls; + switch (i.op) { + case Odiv: + case Orem: + case Oudiv: + case Ourem: + if (KBASE(k) == 1) + goto Emit; + if (i.op == Odiv || i.op == Oudiv) + r0 = TMP(RAX), r1 = TMP(RDX); + else + r0 = TMP(RDX), r1 = TMP(RAX); + emit(Ocopy, k, i.to, r0, R); + emit(Ocopy, k, R, r1, R); + if (rtype(i.arg[1]) == RCon) { + /* immediates not allowed for + * divisions in x86 + */ + r0 = newtmp("isel", k, fn); + } else + r0 = i.arg[1]; + if (fn->tmp[r0.val].slot != -1) + err("unlikely argument %%%s in %s", + fn->tmp[r0.val].name, optab[i.op].name); + if (i.op == Odiv || i.op == Orem) { + emit(Oxidiv, k, R, r0, R); + emit(Osign, k, TMP(RDX), TMP(RAX), R); + } else { + emit(Oxdiv, k, R, r0, R); + emit(Ocopy, k, TMP(RDX), CON_Z, R); + } + emit(Ocopy, k, TMP(RAX), i.arg[0], R); + fixarg(&curi->arg[0], k, curi, fn); + if (rtype(i.arg[1]) == RCon) + emit(Ocopy, k, r0, i.arg[1], R); + break; + case Osar: + case Oshr: + case Oshl: + r0 = i.arg[1]; + if (rtype(r0) == RCon) + goto Emit; + if (fn->tmp[r0.val].slot != -1) + err("unlikely argument %%%s in %s", + fn->tmp[r0.val].name, optab[i.op].name); + i.arg[1] = TMP(RCX); + emit(Ocopy, Kw, R, TMP(RCX), R); + emiti(i); + i1 = curi; + emit(Ocopy, Kw, TMP(RCX), r0, R); + fixarg(&i1->arg[0], argcls(&i, 0), i1, fn); + break; + case Ouwtof: + r0 = newtmp("utof", Kl, fn); + emit(Osltof, k, i.to, r0, R); + emit(Oextuw, Kl, r0, i.arg[0], R); + fixarg(&curi->arg[0], k, curi, fn); + break; + case Oultof: + /* %mask =l and %arg.0, 1 + * %isbig =l shr %arg.0, 63 + * %divided =l shr %arg.0, %isbig + * %or =l or %mask, %divided + * %float =d sltof %or + * %cast =l cast %float + * %addend =l shl %isbig, 52 + * %sum =l add %cast, %addend + * %result =d cast %sum + */ + r0 = newtmp("utof", k, fn); + if (k == Ks) + kc = Kw, sh = 23; + else + kc = Kl, sh = 52; + for (j=0; j<4; j++) + tmp[j] = newtmp("utof", Kl, fn); + for (; j<7; j++) + tmp[j] = newtmp("utof", kc, fn); + emit(Ocast, k, i.to, tmp[6], R); + emit(Oadd, kc, tmp[6], tmp[4], tmp[5]); + emit(Oshl, kc, tmp[5], tmp[1], getcon(sh, fn)); + emit(Ocast, kc, tmp[4], r0, R); + emit(Osltof, k, r0, tmp[3], R); + emit(Oor, Kl, tmp[3], tmp[0], tmp[2]); + emit(Oshr, Kl, tmp[2], i.arg[0], tmp[1]); + sel(*curi++, 0, fn); + emit(Oshr, Kl, tmp[1], i.arg[0], getcon(63, fn)); + fixarg(&curi->arg[0], Kl, curi, fn); + emit(Oand, Kl, tmp[0], i.arg[0], getcon(1, fn)); + fixarg(&curi->arg[0], Kl, curi, fn); + break; + case Ostoui: + i.op = Ostosi; + kc = Ks; + tmp[4] = getcon(0xdf000000, fn); + goto Oftoui; + case Odtoui: + i.op = Odtosi; + kc = Kd; + tmp[4] = getcon(0xc3e0000000000000, fn); + Oftoui: + if (k == Kw) { + r0 = newtmp("ftou", Kl, fn); + emit(Ocopy, Kw, i.to, r0, R); + i.cls = Kl; + i.to = r0; + goto Emit; + } + /* %try0 =l {s,d}tosi %fp + * %mask =l sar %try0, 63 + * + * mask is all ones if the first + * try was oob, all zeroes o.w. + * + * %fps ={s,d} sub %fp, (1<<63) + * %try1 =l {s,d}tosi %fps + * + * %tmp =l and %mask, %try1 + * %res =l or %tmp, %try0 + */ + r0 = newtmp("ftou", kc, fn); + for (j=0; j<4; j++) + tmp[j] = newtmp("ftou", Kl, fn); + emit(Oor, Kl, i.to, tmp[0], tmp[3]); + emit(Oand, Kl, tmp[3], tmp[2], tmp[1]); + emit(i.op, Kl, tmp[2], r0, R); + emit(Oadd, kc, r0, tmp[4], i.arg[0]); + i1 = curi; /* fixarg() can change curi */ + fixarg(&i1->arg[0], kc, i1, fn); + fixarg(&i1->arg[1], kc, i1, fn); + emit(Osar, Kl, tmp[1], tmp[0], getcon(63, fn)); + emit(i.op, Kl, tmp[0], i.arg[0], R); + fixarg(&curi->arg[0], Kl, curi, fn); + break; + case Onop: + break; + case Ostored: + case Ostores: + case Ostorel: + case Ostorew: + case Ostoreh: + case Ostoreb: + if (rtype(i.arg[0]) == RCon) { + if (i.op == Ostored) + i.op = Ostorel; + if (i.op == Ostores) + i.op = Ostorew; + } + seladdr(&i.arg[1], tn, fn); + goto Emit; + case_Oload: + seladdr(&i.arg[0], tn, fn); + goto Emit; + case Odbgloc: + case Ocall: + case Osalloc: + case Ocopy: + case Oadd: + case Osub: + case Oneg: + case Omul: + case Oand: + case Oor: + case Oxor: + case Oxtest: + case Ostosi: + case Odtosi: + case Oswtof: + case Osltof: + case Oexts: + case Otruncd: + case Ocast: + case_Oxsel: + case_Oext: +Emit: + emiti(i); + i1 = curi; /* fixarg() can change curi */ + fixarg(&i1->arg[0], argcls(&i, 0), i1, fn); + fixarg(&i1->arg[1], argcls(&i, 1), i1, fn); + break; + case Oalloc4: + case Oalloc8: + case Oalloc16: + salloc(i.to, i.arg[0], fn); + break; + default: + if (isext(i.op)) + goto case_Oext; + if (isxsel(i.op)) + goto case_Oxsel; + if (isload(i.op)) + goto case_Oload; + if (iscmp(i.op, &kc, &x)) { + switch (x) { + case NCmpI+Cfeq: + /* zf is set when operands are + * unordered, so we may have to + * check pf + */ + r0 = newtmp("isel", Kw, fn); + r1 = newtmp("isel", Kw, fn); + emit(Oand, Kw, i.to, r0, r1); + emit(Oflagfo, k, r1, R, R); + i.to = r0; + break; + case NCmpI+Cfne: + r0 = newtmp("isel", Kw, fn); + r1 = newtmp("isel", Kw, fn); + emit(Oor, Kw, i.to, r0, r1); + emit(Oflagfuo, k, r1, R, R); + i.to = r0; + break; + } + swap = cmpswap(i.arg, x); + if (swap) + x = cmpop(x); + emit(Oflag+x, k, i.to, R, R); + selcmp(i.arg, kc, swap, fn); + break; + } + die("unknown instruction %s", optab[i.op].name); + } + + while (i0>curi && --i0) { + assert(rslot(i0->arg[0], fn) == -1); + assert(rslot(i0->arg[1], fn) == -1); + } +} + +static Ins * +flagi(Ins *i0, Ins *i) +{ + while (i>i0) { + i--; + if (amd64_op[i->op].zflag) + return i; + if (amd64_op[i->op].lflag) + continue; + return 0; + } + return 0; +} + +static Ins* +selsel(Fn *fn, Blk *b, Ins *i, Num *tn) +{ + Ref r, cr[2]; + int c, k, swap, gencmp, gencpy; + Ins *isel0, *isel1, *fi; + Tmp *t; + + assert(i->op == Osel1); + for (isel0=i; b->insop == Osel0) + break; + assert(isel0->op == Osel1); + } + assert(isel0->op == Osel0); + r = isel0->arg[0]; + assert(rtype(r) == RTmp); + t = &fn->tmp[r.val]; + fi = flagi(b->ins, isel0); + cr[0] = cr[1] = R; + gencmp = gencpy = swap = 0; + k = Kw; + c = Cine; + if (!fi || !req(fi->to, r)) { + gencmp = 1; + cr[0] = r; + cr[1] = CON_Z; + } + else if (iscmp(fi->op, &k, &c)) { + if (c == NCmpI+Cfeq + || c == NCmpI+Cfne) { + /* these are selected as 'and' + * or 'or', so we check their + * result with Cine + */ + c = Cine; + goto Other; + } + swap = cmpswap(fi->arg, c); + if (swap) + c = cmpop(c); + if (t->nuse == 1) { + gencmp = 1; + cr[0] = fi->arg[0]; + cr[1] = fi->arg[1]; + *fi = (Ins){.op = Onop}; + } + } + else if (fi->op == Oand && t->nuse == 1 + && (rtype(fi->arg[0]) == RTmp || + rtype(fi->arg[1]) == RTmp)) { + fi->op = Oxtest; + fi->to = R; + if (rtype(fi->arg[1]) == RCon) { + r = fi->arg[1]; + fi->arg[1] = fi->arg[0]; + fi->arg[0] = r; + } + } + else { + Other: + /* since flags are not tracked in liveness, + * the result of the flag-setting instruction + * has to be marked as live + */ + if (t->nuse == 1) + gencpy = 1; + } + /* generate conditional moves */ + for (isel1=i; isel0op = Oxsel+c; + sel(*isel1, tn, fn); + } + assert(!gencmp || !gencpy); + if (gencmp) + selcmp(cr, k, swap, fn); + if (gencpy) + emit(Ocopy, Kw, R, r, R); + *isel0 = (Ins){.op = Onop}; + return isel0; +} + +static void +seljmp(Blk *b, Fn *fn) +{ + Ref r; + int c, k, swap; + Ins *fi; + Tmp *t; + + if (b->jmp.type == Jret0 + || b->jmp.type == Jjmp + || b->jmp.type == Jhlt) + return; + assert(b->jmp.type == Jjnz); + r = b->jmp.arg; + t = &fn->tmp[r.val]; + b->jmp.arg = R; + assert(rtype(r) == RTmp); + if (b->s1 == b->s2) { + chuse(r, -1, fn); + b->jmp.type = Jjmp; + b->s2 = 0; + return; + } + fi = flagi(b->ins, &b->ins[b->nins]); + if (!fi || !req(fi->to, r)) { + selcmp((Ref[2]){r, CON_Z}, Kw, 0, fn); + b->jmp.type = Jjf + Cine; + } + else if (iscmp(fi->op, &k, &c) + && c != NCmpI+Cfeq /* see sel(), selsel() */ + && c != NCmpI+Cfne) { + swap = cmpswap(fi->arg, c); + if (swap) + c = cmpop(c); + if (t->nuse == 1) { + selcmp(fi->arg, k, swap, fn); + *fi = (Ins){.op = Onop}; + } + b->jmp.type = Jjf + c; + } + else if (fi->op == Oand && t->nuse == 1 + && (rtype(fi->arg[0]) == RTmp || + rtype(fi->arg[1]) == RTmp)) { + fi->op = Oxtest; + fi->to = R; + b->jmp.type = Jjf + Cine; + if (rtype(fi->arg[1]) == RCon) { + r = fi->arg[1]; + fi->arg[1] = fi->arg[0]; + fi->arg[0] = r; + } + } + else { + /* since flags are not tracked in liveness, + * the result of the flag-setting instruction + * has to be marked as live + */ + if (t->nuse == 1) + emit(Ocopy, Kw, R, r, R); + b->jmp.type = Jjf + Cine; + } +} + +enum { + Pob, + Pbis, + Pois, + Pobis, + Pbi1, + Pobi1, +}; + +/* mgen generated code + * + * (with-vars (o b i s) + * (patterns + * (ob (add (con o) (tmp b))) + * (bis (add (tmp b) (mul (tmp i) (con s 1 2 4 8)))) + * (ois (add (con o) (mul (tmp i) (con s 1 2 4 8)))) + * (obis (add (con o) (tmp b) (mul (tmp i) (con s 1 2 4 8)))) + * (bi1 (add (tmp b) (tmp i))) + * (obi1 (add (con o) (tmp b) (tmp i))) + * )) + */ + +static int +opn(int op, int l, int r) +{ + static uchar Oaddtbl[91] = { + 2, + 2,2, + 4,4,5, + 6,6,8,8, + 4,4,9,10,9, + 7,7,5,8,9,5, + 4,4,12,10,12,12,12, + 4,4,9,10,9,9,12,9, + 11,11,5,8,9,5,12,9,5, + 7,7,5,8,9,5,12,9,5,5, + 11,11,5,8,9,5,12,9,5,5,5, + 4,4,9,10,9,9,12,9,9,9,9,9, + 7,7,5,8,9,5,12,9,5,5,5,9,5, + }; + int t; + + if (l < r) + t = l, l = r, r = t; + switch (op) { + case Omul: + if (2 <= l) + if (r == 0) { + return 3; + } + return 2; + case Oadd: + return Oaddtbl[(l + l*l)/2 + r]; + default: + return 2; + } +} + +static int +refn(Ref r, Num *tn, Con *con) +{ + int64_t n; + + switch (rtype(r)) { + case RTmp: + if (!tn[r.val].n) + tn[r.val].n = 2; + return tn[r.val].n; + case RCon: + if (con[r.val].type != CBits) + return 1; + n = con[r.val].bits.i; + if (n == 8 || n == 4 || n == 2 || n == 1) + return 0; + return 1; + default: + return INT_MIN; + } +} + +static bits match[13] = { + [4] = BIT(Pob), + [5] = BIT(Pbi1), + [6] = BIT(Pob) | BIT(Pois), + [7] = BIT(Pob) | BIT(Pobi1), + [8] = BIT(Pbi1) | BIT(Pbis), + [9] = BIT(Pbi1) | BIT(Pobi1), + [10] = BIT(Pbi1) | BIT(Pbis) | BIT(Pobi1) | BIT(Pobis), + [11] = BIT(Pob) | BIT(Pobi1) | BIT(Pobis), + [12] = BIT(Pbi1) | BIT(Pobi1) | BIT(Pobis), +}; + +static uchar *matcher[] = { + [Pbi1] = (uchar[]){ + 1,3,1,3,2,0 + }, + [Pbis] = (uchar[]){ + 5,1,8,5,27,1,5,1,2,5,13,3,1,1,3,3,3,2,0,1, + 3,3,3,2,3,1,0,1,29 + }, + [Pob] = (uchar[]){ + 1,3,0,3,1,0 + }, + [Pobi1] = (uchar[]){ + 5,3,9,9,10,33,12,35,45,1,5,3,11,9,7,9,4,9, + 17,1,3,0,3,1,3,2,0,3,1,1,3,0,34,1,37,1,5,2, + 5,7,2,7,8,37,29,1,3,0,1,32 + }, + [Pobis] = (uchar[]){ + 5,2,10,7,11,19,49,1,1,3,3,3,2,1,3,0,3,1,0, + 1,3,0,5,1,8,5,25,1,5,1,2,5,13,3,1,1,3,3,3, + 2,0,1,3,3,3,2,26,1,51,1,5,1,6,5,9,1,3,0,51, + 3,1,1,3,0,45 + }, + [Pois] = (uchar[]){ + 1,3,0,1,3,3,3,2,0 + }, +}; + +/* end of generated code */ + +static void +anumber(Num *tn, Blk *b, Con *con) +{ + Ins *i; + Num *n; + + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (rtype(i->to) != RTmp) + continue; + n = &tn[i->to.val]; + n->l = i->arg[0]; + n->r = i->arg[1]; + n->nl = refn(n->l, tn, con); + n->nr = refn(n->r, tn, con); + n->n = opn(i->op, n->nl, n->nr); + } +} + +static Ref +adisp(Con *c, Num *tn, Ref r, Fn *fn, int s) +{ + Ref v[2]; + int n; + + while (!req(r, R)) { + assert(rtype(r) == RTmp); + n = refn(r, tn, fn->con); + if (!(match[n] & BIT(Pob))) + break; + runmatch(matcher[Pob], tn, r, v); + assert(rtype(v[0]) == RCon); + addcon(c, &fn->con[v[0].val], s); + r = v[1]; + } + return r; +} + +static int +amatch(Addr *a, Num *tn, Ref r, Fn *fn) +{ + static int pat[] = {Pobis, Pobi1, Pbis, Pois, Pbi1, -1}; + Ref ro, rb, ri, rs, v[4]; + Con *c, co; + int s, n, *p; + + if (rtype(r) != RTmp) + return 0; + + n = refn(r, tn, fn->con); + memset(v, 0, sizeof v); + for (p=pat; *p>=0; p++) + if (match[n] & BIT(*p)) { + runmatch(matcher[*p], tn, r, v); + break; + } + if (*p < 0) + v[1] = r; + + memset(&co, 0, sizeof co); + ro = v[0]; + rb = adisp(&co, tn, v[1], fn, 1); + ri = v[2]; + rs = v[3]; + s = 1; + + if (*p < 0 && co.type != CUndef) + if (amatch(a, tn, rb, fn)) + return addcon(&a->offset, &co, 1); + if (!req(ro, R)) { + assert(rtype(ro) == RCon); + c = &fn->con[ro.val]; + if (!addcon(&co, c, 1)) + return 0; + } + if (!req(rs, R)) { + assert(rtype(rs) == RCon); + c = &fn->con[rs.val]; + assert(c->type == CBits); + s = c->bits.i; + } + ri = adisp(&co, tn, ri, fn, s); + *a = (Addr){co, rb, ri, s}; + + if (rtype(ri) == RTmp) + if (fn->tmp[ri.val].slot != -1) { + if (a->scale != 1 + || fn->tmp[rb.val].slot != -1) + return 0; + a->base = ri; + a->index = rb; + } + if (!req(a->base, R)) { + assert(rtype(a->base) == RTmp); + s = fn->tmp[a->base.val].slot; + if (s != -1) + a->base = SLOT(s); + } + return 1; +} + +/* instruction selection + * requires use counts (as given by parsing) + */ +void +amd64_isel(Fn *fn) +{ + Blk *b, **sb; + Ins *i; + Phi *p; + uint a; + int n, al; + int64_t sz; + Num *num; + + /* assign slots to fast allocs */ + b = fn->start; + /* specific to NAlign == 3 */ /* or change n=4 and sz /= 4 below */ + for (al=Oalloc, n=4; al<=Oalloc1; al++, n*=2) + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op == al) { + if (rtype(i->arg[0]) != RCon) + break; + sz = fn->con[i->arg[0].val].bits.i; + if (sz < 0 || sz >= INT_MAX-15) + err("invalid alloc size %"PRId64, sz); + sz = (sz + n-1) & -n; + sz /= 4; + if (sz > INT_MAX - fn->slot) + die("alloc too large"); + fn->tmp[i->to.val].slot = fn->slot; + fn->slot += sz; + fn->salign = 2 + al - Oalloc; + *i = (Ins){.op = Onop}; + } + + /* process basic blocks */ + n = fn->ntmp; + num = emalloc(n * sizeof num[0]); + for (b=fn->start; b; b=b->link) { + curi = &insb[NIns]; + for (sb=(Blk*[3]){b->s1, b->s2, 0}; *sb; sb++) + for (p=(*sb)->phi; p; p=p->link) { + for (a=0; p->blk[a] != b; a++) + assert(a+1 < p->narg); + fixarg(&p->arg[a], p->cls, 0, fn); + } + memset(num, 0, n * sizeof num[0]); + anumber(num, b, fn->con); + seljmp(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) { + --i; + assert(i->op != Osel0); + if (i->op == Osel1) + i = selsel(fn, b, i, num); + else + sel(*i, num, fn); + } + idup(b, curi, &insb[NIns]-curi); + } + free(num); + + if (debug['I']) { + fprintf(stderr, "\n> After instruction selection:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/amd64/sysv.c b/src/qbe/amd64/sysv.c new file mode 100644 index 00000000..98964c90 --- /dev/null +++ b/src/qbe/amd64/sysv.c @@ -0,0 +1,721 @@ +#include "all.h" + +typedef struct AClass AClass; +typedef struct RAlloc RAlloc; + +struct AClass { + Typ *type; + int inmem; + int align; + uint size; + int cls[2]; + Ref ref[2]; +}; + +struct RAlloc { + Ins i; + RAlloc *link; +}; + +static void +classify(AClass *a, Typ *t, uint s) +{ + Field *f; + int *cls; + uint n, s1; + + for (n=0, s1=s; nnunion; n++, s=s1) + for (f=t->fields[n]; f->type!=FEnd; f++) { + assert(s <= 16); + cls = &a->cls[s/8]; + switch (f->type) { + case FEnd: + die("unreachable"); + case FPad: + /* don't change anything */ + s += f->len; + break; + case Fs: + case Fd: + if (*cls == Kx) + *cls = Kd; + s += f->len; + break; + case Fb: + case Fh: + case Fw: + case Fl: + *cls = Kl; + s += f->len; + break; + case FTyp: + classify(a, &typ[f->len], s); + s += typ[f->len].size; + break; + } + } +} + +static void +typclass(AClass *a, Typ *t) +{ + uint sz, al; + + sz = t->size; + al = 1u << t->align; + + /* the ABI requires sizes to be rounded + * up to the nearest multiple of 8, moreover + * it makes it easy load and store structures + * in registers + */ + if (al < 8) + al = 8; + sz = (sz + al-1) & -al; + + a->type = t; + a->size = sz; + a->align = t->align; + + if (t->isdark || sz > 16 || sz == 0) { + /* large or unaligned structures are + * required to be passed in memory + */ + a->inmem = 1; + return; + } + + a->cls[0] = Kx; + a->cls[1] = Kx; + a->inmem = 0; + classify(a, t, 0); +} + +static int +retr(Ref reg[2], AClass *aret) +{ + static int retreg[2][2] = {{RAX, RDX}, {XMM0, XMM0+1}}; + int n, k, ca, nr[2]; + + nr[0] = nr[1] = 0; + ca = 0; + for (n=0; (uint)n*8size; n++) { + k = KBASE(aret->cls[n]); + reg[n] = TMP(retreg[k][nr[k]++]); + ca += 1 << (2 * k); + } + return ca; +} + +static void +selret(Blk *b, Fn *fn) +{ + int j, k, ca; + Ref r, r0, reg[2]; + AClass aret; + + j = b->jmp.type; + + if (!isret(j) || j == Jret0) + return; + + r0 = b->jmp.arg; + b->jmp.type = Jret0; + + if (j == Jretc) { + typclass(&aret, &typ[fn->retty]); + if (aret.inmem) { + assert(rtype(fn->retr) == RTmp); + emit(Ocopy, Kl, TMP(RAX), fn->retr, R); + emit(Oblit1, 0, R, INT(aret.type->size), R); + emit(Oblit0, 0, R, r0, fn->retr); + ca = 1; + } else { + ca = retr(reg, &aret); + if (aret.size > 8) { + r = newtmp("abi", Kl, fn); + emit(Oload, Kl, reg[1], r, R); + emit(Oadd, Kl, r, r0, getcon(8, fn)); + } + emit(Oload, Kl, reg[0], r0, R); + } + } else { + k = j - Jretw; + if (KBASE(k) == 0) { + emit(Ocopy, k, TMP(RAX), r0, R); + ca = 1; + } else { + emit(Ocopy, k, TMP(XMM0), r0, R); + ca = 1 << 2; + } + } + + b->jmp.arg = CALL(ca); +} + +static int +argsclass(Ins *i0, Ins *i1, AClass *ac, int op, AClass *aret, Ref *env) +{ + int varc, envc, nint, ni, nsse, ns, n, *pn; + AClass *a; + Ins *i; + + if (aret && aret->inmem) + nint = 5; /* hidden argument */ + else + nint = 6; + nsse = 8; + varc = 0; + envc = 0; + for (i=i0, a=ac; iop - op + Oarg) { + case Oarg: + if (KBASE(i->cls) == 0) + pn = &nint; + else + pn = &nsse; + if (*pn > 0) { + --*pn; + a->inmem = 0; + } else + a->inmem = 2; + a->align = 3; + a->size = 8; + a->cls[0] = i->cls; + break; + case Oargc: + n = i->arg[0].val; + typclass(a, &typ[n]); + if (a->inmem) + continue; + ni = ns = 0; + for (n=0; (uint)n*8size; n++) + if (KBASE(a->cls[n]) == 0) + ni++; + else + ns++; + if (nint >= ni && nsse >= ns) { + nint -= ni; + nsse -= ns; + } else + a->inmem = 1; + break; + case Oarge: + envc = 1; + if (op == Opar) + *env = i->to; + else + *env = i->arg[0]; + break; + case Oargv: + varc = 1; + break; + default: + die("unreachable"); + } + + if (varc && envc) + err("sysv abi does not support variadic env calls"); + + return ((varc|envc) << 12) | ((6-nint) << 4) | ((8-nsse) << 8); +} + +int amd64_sysv_rsave[] = { + RDI, RSI, RDX, RCX, R8, R9, R10, R11, RAX, + XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, -1 +}; +int amd64_sysv_rclob[] = {RBX, R12, R13, R14, R15, -1}; + +MAKESURE(sysv_arrays_ok, + sizeof amd64_sysv_rsave == (NGPS_SYSV+NFPS+1) * sizeof(int) && + sizeof amd64_sysv_rclob == (NCLR_SYSV+1) * sizeof(int) +); + +/* layout of call's second argument (RCall) + * + * 29 12 8 4 3 0 + * |0...00|x|xxxx|xxxx|xx|xx| range + * | | | | ` gp regs returned (0..2) + * | | | ` sse regs returned (0..2) + * | | ` gp regs passed (0..6) + * | ` sse regs passed (0..8) + * ` 1 if rax is used to pass data (0..1) + */ + +bits +amd64_sysv_retregs(Ref r, int p[2]) +{ + bits b; + int ni, nf; + + assert(rtype(r) == RCall); + b = 0; + ni = r.val & 3; + nf = (r.val >> 2) & 3; + if (ni >= 1) + b |= BIT(RAX); + if (ni >= 2) + b |= BIT(RDX); + if (nf >= 1) + b |= BIT(XMM0); + if (nf >= 2) + b |= BIT(XMM1); + if (p) { + p[0] = ni; + p[1] = nf; + } + return b; +} + +bits +amd64_sysv_argregs(Ref r, int p[2]) +{ + bits b; + int j, ni, nf, ra; + + assert(rtype(r) == RCall); + b = 0; + ni = (r.val >> 4) & 15; + nf = (r.val >> 8) & 15; + ra = (r.val >> 12) & 1; + for (j=0; jarg[1], R)) { + assert(rtype(i1->arg[1]) == RType); + typclass(&aret, &typ[i1->arg[1].val]); + ca = argsclass(i0, i1, ac, Oarg, &aret, &env); + } else + ca = argsclass(i0, i1, ac, Oarg, 0, &env); + + for (stk=0, a=&ac[i1-i0]; a>ac;) + if ((--a)->inmem) { + if (a->align > 4) + err("sysv abi requires alignments of 16 or less"); + stk += a->size; + if (a->align == 4) + stk += stk & 15; + } + stk += stk & 15; + if (stk) { + r = getcon(-(int64_t)stk, fn); + emit(Osalloc, Kl, R, r, R); + } + + if (!req(i1->arg[1], R)) { + if (aret.inmem) { + /* get the return location from eax + * it saves one callee-save reg */ + r1 = newtmp("abi", Kl, fn); + emit(Ocopy, Kl, i1->to, TMP(RAX), R); + ca += 1; + } else { + /* todo, may read out of bounds. + * gcc did this up until 5.2, but + * this should still be fixed. + */ + if (aret.size > 8) { + r = newtmp("abi", Kl, fn); + aret.ref[1] = newtmp("abi", aret.cls[1], fn); + emit(Ostorel, 0, R, aret.ref[1], r); + emit(Oadd, Kl, r, i1->to, getcon(8, fn)); + } + aret.ref[0] = newtmp("abi", aret.cls[0], fn); + emit(Ostorel, 0, R, aret.ref[0], i1->to); + ca += retr(reg, &aret); + if (aret.size > 8) + emit(Ocopy, aret.cls[1], aret.ref[1], reg[1], R); + emit(Ocopy, aret.cls[0], aret.ref[0], reg[0], R); + r1 = i1->to; + } + /* allocate return pad */ + ra = alloc(sizeof *ra); + /* specific to NAlign == 3 */ + al = aret.align >= 2 ? aret.align - 2 : 0; + ra->i = (Ins){Oalloc+al, Kl, r1, {getcon(aret.size, fn)}}; + ra->link = (*rap); + *rap = ra; + } else { + ra = 0; + if (KBASE(i1->cls) == 0) { + emit(Ocopy, i1->cls, i1->to, TMP(RAX), R); + ca += 1; + } else { + emit(Ocopy, i1->cls, i1->to, TMP(XMM0), R); + ca += 1 << 2; + } + } + + emit(Ocall, i1->cls, R, i1->arg[0], CALL(ca)); + + if (!req(R, env)) + emit(Ocopy, Kl, TMP(RAX), env, R); + else if ((ca >> 12) & 1) /* vararg call */ + emit(Ocopy, Kw, TMP(RAX), getcon((ca >> 8) & 15, fn), R); + + ni = ns = 0; + if (ra && aret.inmem) + emit(Ocopy, Kl, rarg(Kl, &ni, &ns), ra->i.to, R); /* pass hidden argument */ + + for (i=i0, a=ac; iop >= Oarge || a->inmem) + continue; + r1 = rarg(a->cls[0], &ni, &ns); + if (i->op == Oargc) { + if (a->size > 8) { + r2 = rarg(a->cls[1], &ni, &ns); + r = newtmp("abi", Kl, fn); + emit(Oload, a->cls[1], r2, r, R); + emit(Oadd, Kl, r, i->arg[1], getcon(8, fn)); + } + emit(Oload, a->cls[0], r1, i->arg[1], R); + } else + emit(Ocopy, i->cls, r1, i->arg[0], R); + } + + if (!stk) + return; + + r = newtmp("abi", Kl, fn); + for (i=i0, a=ac, off=0; iop >= Oarge || !a->inmem) + continue; + r1 = newtmp("abi", Kl, fn); + if (i->op == Oargc) { + if (a->align == 4) + off += off & 15; + emit(Oblit1, 0, R, INT(a->type->size), R); + emit(Oblit0, 0, R, i->arg[1], r1); + } else + emit(Ostorel, 0, R, i->arg[0], r1); + emit(Oadd, Kl, r1, r, getcon(off, fn)); + off += a->size; + } + emit(Osalloc, Kl, r, getcon(stk, fn), R); +} + +static int +selpar(Fn *fn, Ins *i0, Ins *i1) +{ + AClass *ac, *a, aret; + Ins *i; + int ni, ns, s, al, fa; + Ref r, env; + + env = R; + ac = alloc((i1-i0) * sizeof ac[0]); + curi = &insb[NIns]; + ni = ns = 0; + + if (fn->retty >= 0) { + typclass(&aret, &typ[fn->retty]); + fa = argsclass(i0, i1, ac, Opar, &aret, &env); + } else + fa = argsclass(i0, i1, ac, Opar, 0, &env); + fn->reg = amd64_sysv_argregs(CALL(fa), 0); + + for (i=i0, a=ac; iop != Oparc || a->inmem) + continue; + if (a->size > 8) { + r = newtmp("abi", Kl, fn); + a->ref[1] = newtmp("abi", Kl, fn); + emit(Ostorel, 0, R, a->ref[1], r); + emit(Oadd, Kl, r, i->to, getcon(8, fn)); + } + a->ref[0] = newtmp("abi", Kl, fn); + emit(Ostorel, 0, R, a->ref[0], i->to); + /* specific to NAlign == 3 */ + al = a->align >= 2 ? a->align - 2 : 0; + emit(Oalloc+al, Kl, i->to, getcon(a->size, fn), R); + } + + if (fn->retty >= 0 && aret.inmem) { + r = newtmp("abi", Kl, fn); + emit(Ocopy, Kl, r, rarg(Kl, &ni, &ns), R); + fn->retr = r; + } + + for (i=i0, a=ac, s=4; iinmem) { + case 1: + if (a->align > 4) + err("sysv abi requires alignments of 16 or less"); + if (a->align == 4) + s = (s+3) & -4; + fn->tmp[i->to.val].slot = -s; + s += a->size / 4; + continue; + case 2: + emit(Oload, i->cls, i->to, SLOT(-s), R); + s += 2; + continue; + } + if (i->op == Opare) + continue; + r = rarg(a->cls[0], &ni, &ns); + if (i->op == Oparc) { + emit(Ocopy, a->cls[0], a->ref[0], r, R); + if (a->size > 8) { + r = rarg(a->cls[1], &ni, &ns); + emit(Ocopy, a->cls[1], a->ref[1], r, R); + } + } else + emit(Ocopy, i->cls, i->to, r, R); + } + + if (!req(R, env)) + emit(Ocopy, Kl, env, TMP(RAX), R); + + return fa | (s*4)<<12; +} + +static Blk * +split(Fn *fn, Blk *b) +{ + Blk *bn; + + ++fn->nblk; + bn = newblk(); + idup(bn, curi, &insb[NIns]-curi); + curi = &insb[NIns]; + bn->visit = ++b->visit; + strf(bn->name, "%s.%d", b->name, b->visit); + bn->loop = b->loop; + bn->link = b->link; + b->link = bn; + return bn; +} + +static void +chpred(Blk *b, Blk *bp, Blk *bp1) +{ + Phi *p; + uint a; + + for (p=b->phi; p; p=p->link) { + for (a=0; p->blk[a]!=bp; a++) + assert(a+1narg); + p->blk[a] = bp1; + } +} + +static void +selvaarg(Fn *fn, Blk *b, Ins *i) +{ + Ref loc, lreg, lstk, nr, r0, r1, c4, c8, c16, c, ap; + Blk *b0, *bstk, *breg; + int isint; + + c4 = getcon(4, fn); + c8 = getcon(8, fn); + c16 = getcon(16, fn); + ap = i->arg[0]; + isint = KBASE(i->cls) == 0; + + /* @b [...] + r0 =l add ap, (0 or 4) + nr =l loadsw r0 + r1 =w cultw nr, (48 or 176) + jnz r1, @breg, @bstk + @breg + r0 =l add ap, 16 + r1 =l loadl r0 + lreg =l add r1, nr + r0 =w add nr, (8 or 16) + r1 =l add ap, (0 or 4) + storew r0, r1 + @bstk + r0 =l add ap, 8 + lstk =l loadl r0 + r1 =l add lstk, 8 + storel r1, r0 + @b0 + %loc =l phi @breg %lreg, @bstk %lstk + i->to =(i->cls) load %loc + */ + + loc = newtmp("abi", Kl, fn); + emit(Oload, i->cls, i->to, loc, R); + b0 = split(fn, b); + b0->jmp = b->jmp; + b0->s1 = b->s1; + b0->s2 = b->s2; + if (b->s1) + chpred(b->s1, b, b0); + if (b->s2 && b->s2 != b->s1) + chpred(b->s2, b, b0); + + lreg = newtmp("abi", Kl, fn); + nr = newtmp("abi", Kl, fn); + r0 = newtmp("abi", Kw, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorew, Kw, R, r0, r1); + emit(Oadd, Kl, r1, ap, isint ? CON_Z : c4); + emit(Oadd, Kw, r0, nr, isint ? c8 : c16); + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Oadd, Kl, lreg, r1, nr); + emit(Oload, Kl, r1, r0, R); + emit(Oadd, Kl, r0, ap, c16); + breg = split(fn, b); + breg->jmp.type = Jjmp; + breg->s1 = b0; + + lstk = newtmp("abi", Kl, fn); + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r1, r0); + emit(Oadd, Kl, r1, lstk, c8); + emit(Oload, Kl, lstk, r0, R); + emit(Oadd, Kl, r0, ap, c8); + bstk = split(fn, b); + bstk->jmp.type = Jjmp; + bstk->s1 = b0; + + b0->phi = alloc(sizeof *b0->phi); + *b0->phi = (Phi){ + .cls = Kl, .to = loc, + .narg = 2, + .blk = vnew(2, sizeof b0->phi->blk[0], PFn), + .arg = vnew(2, sizeof b0->phi->arg[0], PFn), + }; + b0->phi->blk[0] = bstk; + b0->phi->blk[1] = breg; + b0->phi->arg[0] = lstk; + b0->phi->arg[1] = lreg; + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kw, fn); + b->jmp.type = Jjnz; + b->jmp.arg = r1; + b->s1 = breg; + b->s2 = bstk; + c = getcon(isint ? 48 : 176, fn); + emit(Ocmpw+Ciult, Kw, r1, nr, c); + emit(Oloadsw, Kl, nr, r0, R); + emit(Oadd, Kl, r0, ap, isint ? CON_Z : c4); +} + +static void +selvastart(Fn *fn, int fa, Ref ap) +{ + Ref r0, r1; + int gp, fp, sp; + + gp = ((fa >> 4) & 15) * 8; + fp = 48 + ((fa >> 8) & 15) * 16; + sp = fa >> 12; + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r1, r0); + emit(Oadd, Kl, r1, TMP(RBP), getcon(-176, fn)); + emit(Oadd, Kl, r0, ap, getcon(16, fn)); + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r1, r0); + emit(Oadd, Kl, r1, TMP(RBP), getcon(sp, fn)); + emit(Oadd, Kl, r0, ap, getcon(8, fn)); + r0 = newtmp("abi", Kl, fn); + emit(Ostorew, Kw, R, getcon(fp, fn), r0); + emit(Oadd, Kl, r0, ap, getcon(4, fn)); + emit(Ostorew, Kw, R, getcon(gp, fn), ap); +} + +void +amd64_sysv_abi(Fn *fn) +{ + Blk *b; + Ins *i, *i0; + RAlloc *ral; + int n0, n1, ioff, fa; + + for (b=fn->start; b; b=b->link) + b->visit = 0; + + /* lower parameters */ + for (b=fn->start, i=b->ins; i<&b->ins[b->nins]; i++) + if (!ispar(i->op)) + break; + fa = selpar(fn, b->ins, i); + n0 = &insb[NIns] - curi; + ioff = i - b->ins; + n1 = b->nins - ioff; + vgrow(&b->ins, n0+n1); + icpy(b->ins+n0, b->ins+ioff, n1); + icpy(b->ins, curi, n0); + b->nins = n0+n1; + + /* lower calls, returns, and vararg instructions */ + ral = 0; + b = fn->start; + do { + if (!(b = b->link)) + b = fn->start; /* do it last */ + if (b->visit) + continue; + curi = &insb[NIns]; + selret(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) + switch ((--i)->op) { + default: + emiti(*i); + break; + case Ocall: + for (i0=i; i0>b->ins; i0--) + if (!isarg((i0-1)->op)) + break; + selcall(fn, i0, i, &ral); + i = i0; + break; + case Ovastart: + selvastart(fn, fa, i->arg[0]); + break; + case Ovaarg: + selvaarg(fn, b, i); + break; + case Oarg: + case Oargc: + die("unreachable"); + } + if (b == fn->start) + for (; ral; ral=ral->link) + emiti(ral->i); + idup(b, curi, &insb[NIns]-curi); + } while (b != fn->start); + + if (debug['A']) { + fprintf(stderr, "\n> After ABI lowering:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/amd64/targ.c b/src/qbe/amd64/targ.c new file mode 100644 index 00000000..3edaf8b4 --- /dev/null +++ b/src/qbe/amd64/targ.c @@ -0,0 +1,67 @@ +#include "all.h" + +Amd64Op amd64_op[NOp] = { +#define O(op, t, x) [O##op] = +#define X(nm, zf, lf) { nm, zf, lf, }, + #include "../ops.h" +}; + +static int +amd64_memargs(int op) +{ + return amd64_op[op].nmem; +} + +#define AMD64_COMMON \ + .gpr0 = RAX, \ + .ngpr = NGPR, \ + .fpr0 = XMM0, \ + .nfpr = NFPR, \ + .rglob = BIT(RBP) | BIT(RSP), \ + .nrglob = 2, \ + .memargs = amd64_memargs, \ + .abi0 = elimsb, \ + .isel = amd64_isel, \ + .cansel = 1, + +Target T_amd64_sysv = { + .name = "amd64_sysv", + .emitfin = elf_emitfin, + .asloc = ".L", + .abi1 = amd64_sysv_abi, + .rsave = amd64_sysv_rsave, + .nrsave = {NGPS_SYSV, NFPS}, + .retregs = amd64_sysv_retregs, + .argregs = amd64_sysv_argregs, + .emitfn = amd64_sysv_emitfn, + AMD64_COMMON +}; + +Target T_amd64_apple = { + .name = "amd64_apple", + .apple = 1, + .emitfin = macho_emitfin, + .asloc = "L", + .assym = "_", + .abi1 = amd64_sysv_abi, + .rsave = amd64_sysv_rsave, + .nrsave = {NGPS_SYSV, NFPS}, + .retregs = amd64_sysv_retregs, + .argregs = amd64_sysv_argregs, + .emitfn = amd64_sysv_emitfn, + AMD64_COMMON +}; + +Target T_amd64_win = { + .name = "amd64_win", + .windows = 1, + .emitfin = pe_emitfin, + .asloc = "L", + .abi1 = amd64_winabi_abi, + .rsave = amd64_winabi_rsave, + .nrsave = {NGPS_WIN, NFPS}, + .retregs = amd64_winabi_retregs, + .argregs = amd64_winabi_argregs, + .emitfn = amd64_winabi_emitfn, + AMD64_COMMON +}; diff --git a/src/qbe/amd64/winabi.c b/src/qbe/amd64/winabi.c new file mode 100755 index 00000000..5136e620 --- /dev/null +++ b/src/qbe/amd64/winabi.c @@ -0,0 +1,763 @@ +#include "all.h" + +#include + +typedef enum ArgPassStyle { + APS_Invalid = 0, + APS_Register, + APS_InlineOnStack, + APS_CopyAndPointerInRegister, + APS_CopyAndPointerOnStack, + APS_VarargsTag, + APS_EnvTag, +} ArgPassStyle; + +typedef struct ArgClass { + Typ* type; + ArgPassStyle style; + int align; + uint size; + int cls; + Ref ref; +} ArgClass; + +typedef struct ExtraAlloc ExtraAlloc; +struct ExtraAlloc { + Ins instr; + ExtraAlloc* link; +}; + +#define ALIGN_DOWN(n, a) ((n) & ~((a)-1)) +#define ALIGN_UP(n, a) ALIGN_DOWN((n) + (a)-1, (a)) + +// Number of stack bytes required be reserved for the callee. +#define SHADOW_SPACE_SIZE 32 + +int amd64_winabi_rsave[] = {RCX, RDX, R8, R9, R10, R11, RAX, XMM0, + XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, + XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, -1}; +int amd64_winabi_rclob[] = {RBX, R12, R13, R14, R15, RSI, RDI, -1}; + +MAKESURE(winabi_arrays_ok, + sizeof amd64_winabi_rsave == (NGPS_WIN + NFPS + 1) * sizeof(int) && + sizeof amd64_winabi_rclob == (NCLR_WIN + 1) * sizeof(int)); + +// layout of call's second argument (RCall) +// +// bit 0: rax returned +// bit 1: xmm0 returned +// bits 23: 0 +// bits 4567: rcx, rdx, r8, r9 passed +// bits 89ab: xmm0,1,2,3 passed +// bit c: env call (rax passed) +// bits d..1f: 0 + +bits amd64_winabi_retregs(Ref r, int p[2]) { + assert(rtype(r) == RCall); + + bits b = 0; + int num_int_returns = r.val & 1; + int num_float_returns = r.val & 2; + if (num_int_returns == 1) { + b |= BIT(RAX); + } else { + b |= BIT(XMM0); + } + if (p) { + p[0] = num_int_returns; + p[1] = num_float_returns; + } + return b; +} + +static uint popcnt(bits b) { + b = (b & 0x5555555555555555) + ((b >> 1) & 0x5555555555555555); + b = (b & 0x3333333333333333) + ((b >> 2) & 0x3333333333333333); + b = (b & 0x0f0f0f0f0f0f0f0f) + ((b >> 4) & 0x0f0f0f0f0f0f0f0f); + b += (b >> 8); + b += (b >> 16); + b += (b >> 32); + return b & 0xff; +} + +bits amd64_winabi_argregs(Ref r, int p[2]) { + assert(rtype(r) == RCall); + + // On SysV, these are counts. Here, a count isn't sufficient, we actually need + // to know which ones are in use because they're not necessarily contiguous. + int int_passed = (r.val >> 4) & 15; + int float_passed = (r.val >> 8) & 15; + bool env_param = (r.val >> 12) & 1; + + bits b = 0; + b |= (int_passed & 1) ? BIT(RCX) : 0; + b |= (int_passed & 2) ? BIT(RDX) : 0; + b |= (int_passed & 4) ? BIT(R8) : 0; + b |= (int_passed & 8) ? BIT(R9) : 0; + b |= (float_passed & 1) ? BIT(XMM0) : 0; + b |= (float_passed & 2) ? BIT(XMM1) : 0; + b |= (float_passed & 4) ? BIT(XMM2) : 0; + b |= (float_passed & 8) ? BIT(XMM3) : 0; + b |= env_param ? BIT(RAX) : 0; + if (p) { + // TODO: The only place this is used is live.c. I'm not sure what should be + // returned here wrt to using the same counter for int/float regs on win. + // For now, try the number of registers in use even though they're not + // contiguous. + p[0] = popcnt(int_passed); + p[1] = popcnt(float_passed); + } + return b; +} + +typedef struct RegisterUsage { + // Counter for both int/float as they're counted together. Only if the bool's + // set in regs_passed is the given register *actually* needed for a value + // (i.e. needs to be saved, etc.). + int num_regs_passed; + + // Indexed first by 0=int, 1=float, use KBASE(cls). + // Indexed second by register index in calling convention, so for integer, + // 0=RCX, 1=RDX, 2=R8, 3=R9, and for float XMM0, XMM1, XMM2, XMM3. + bool regs_passed[2][4]; + + bool rax_returned; + bool xmm0_returned; + + // This is also used as where the va_start will start for varargs functions + // (there's no 'Oparv', so we need to keep track of a count here.) + int num_named_args_passed; + + // This is set when classifying the arguments for a call (but not when + // classifying the parameters of a function definition). + bool is_varargs_call; + + bool has_env; +} RegisterUsage; + +static int register_usage_to_call_arg_value(RegisterUsage reg_usage) { + return (reg_usage.rax_returned << 0) | // + (reg_usage.xmm0_returned << 1) | // + (reg_usage.regs_passed[0][0] << 4) | // + (reg_usage.regs_passed[0][1] << 5) | // + (reg_usage.regs_passed[0][2] << 6) | // + (reg_usage.regs_passed[0][3] << 7) | // + (reg_usage.regs_passed[1][0] << 8) | // + (reg_usage.regs_passed[1][1] << 9) | // + (reg_usage.regs_passed[1][2] << 10) | // + (reg_usage.regs_passed[1][3] << 11) | // + (reg_usage.has_env << 12); +} + +// Assigns the argument to a register if there's any left according to the +// calling convention, and updates the regs_passed bools. Otherwise marks the +// value as needing stack space to be passed. +static void assign_register_or_stack(RegisterUsage* reg_usage, + ArgClass* arg, + bool is_float, + bool by_copy) { + if (reg_usage->num_regs_passed == 4) { + arg->style = by_copy ? APS_CopyAndPointerOnStack : APS_InlineOnStack; + } else { + reg_usage->regs_passed[is_float][reg_usage->num_regs_passed] = true; + ++reg_usage->num_regs_passed; + arg->style = by_copy ? APS_CopyAndPointerInRegister : APS_Register; + } + ++reg_usage->num_named_args_passed; +} + +static bool type_is_by_copy(Typ* type) { + // Note that only these sizes are passed by register, even though e.g. a + // 5 byte struct would "fit", it still is passed by copy-and-pointer. + return type->isdark || (type->size != 1 && type->size != 2 && + type->size != 4 && type->size != 8); +} + +// This function is used for both arguments and parameters. +// begin_instr should either point at the first Oarg or Opar, and end_instr +// should point past the last one (so to the Ocall for arguments, or to the +// first 'real' instruction of the function for parameters). +static void classify_arguments(RegisterUsage* reg_usage, + Ins* begin_instr, + Ins* end_instr, + ArgClass* arg_classes, + Ref* env) { + ArgClass* arg = arg_classes; + // For each argument, determine how it will be passed (int, float, stack) + // and update the `reg_usage` counts. Additionally, fill out arg_classes for + // each argument. + for (Ins* instr = begin_instr; instr < end_instr; ++instr, ++arg) { + switch (instr->op) { + case Oarg: + case Opar: + assign_register_or_stack(reg_usage, arg, KBASE(instr->cls), + /*by_copy=*/false); + arg->cls = instr->cls; + arg->align = 3; + arg->size = 8; + break; + case Oargc: + case Oparc: { + int typ_index = instr->arg[0].val; + Typ* type = &typ[typ_index]; + bool by_copy = type_is_by_copy(type); + assign_register_or_stack(reg_usage, arg, /*is_float=*/false, by_copy); + arg->cls = Kl; + if (!by_copy && type->size <= 4) { + arg->cls = Kw; + } + arg->align = 3; + arg->size = type->size; + break; + } + case Oarge: + *env = instr->arg[0]; + arg->style = APS_EnvTag; + reg_usage->has_env = true; + break; + case Opare: + *env = instr->to; + arg->style = APS_EnvTag; + reg_usage->has_env = true; + break; + case Oargv: + reg_usage->is_varargs_call = true; + arg->style = APS_VarargsTag; + break; + } + } + + if (reg_usage->has_env && reg_usage->is_varargs_call) { + die("can't use env with varargs"); + } + + // During a varargs call, float arguments have to be duplicated to their + // associated integer register, so mark them as in-use too. + if (reg_usage->is_varargs_call) { + for (int i = 0; i < 4; ++i) { + if (reg_usage->regs_passed[/*float*/ 1][i]) { + reg_usage->regs_passed[/*int*/ 0][i] = true; + } + } + } +} + +static bool is_integer_type(int ty) { + assert(ty >= 0 && ty < 4 && "expecting Kw Kl Ks Kd"); + return KBASE(ty) == 0; +} + +static Ref register_for_arg(int cls, int counter) { + assert(counter < 4); + if (is_integer_type(cls)) { + return TMP(amd64_winabi_rsave[counter]); + } else { + return TMP(XMM0 + counter); + } +} + +static Ins* lower_call(Fn* func, + Blk* block, + Ins* call_instr, + ExtraAlloc** pextra_alloc) { + // Call arguments are instructions. Walk through them to find the end of the + // call+args that we need to process (and return the instruction past the body + // of the instruction for continuing processing). + Ins* instr_past_args = call_instr - 1; + for (; instr_past_args >= block->ins; --instr_past_args) { + if (!isarg(instr_past_args->op)) { + break; + } + } + Ins* earliest_arg_instr = instr_past_args + 1; + + // Don't need an ArgClass for the call itself, so one less than the total + // number of instructions we're dealing with. + uint num_args = call_instr - earliest_arg_instr; + ArgClass* arg_classes = alloc(num_args * sizeof(ArgClass)); + + RegisterUsage reg_usage = {0}; + ArgClass ret_arg_class = {0}; + + // Ocall's two arguments are the the function to be called in 0, and, if the + // the function returns a non-basic type, then arg[1] is a reference to the + // type of the return. req checks if Refs are equal; `R` is 0. + bool il_has_struct_return = !req(call_instr->arg[1], R); + bool is_struct_return = false; + if (il_has_struct_return) { + Typ* ret_type = &typ[call_instr->arg[1].val]; + is_struct_return = type_is_by_copy(ret_type); + if (is_struct_return) { + assign_register_or_stack(®_usage, &ret_arg_class, /*is_float=*/false, + /*by_copy=*/true); + } + ret_arg_class.size = ret_type->size; + } + Ref env = R; + classify_arguments(®_usage, earliest_arg_instr, call_instr, arg_classes, + &env); + + // We now know which arguments are on the stack and which are in registers, so + // we can allocate the correct amount of space to stash the stack-located ones + // into. + uint stack_usage = 0; + for (uint i = 0; i < num_args; ++i) { + ArgClass* arg = &arg_classes[i]; + // stack_usage only accounts for pushes that are for values that don't have + // enough registers. Large struct copies are alloca'd separately, and then + // only have (potentially) 8 bytes to add to stack_usage here. + if (arg->style == APS_InlineOnStack) { + if (arg->align > 4) { + err("win abi cannot pass alignments > 16"); + } + stack_usage += arg->size; + } else if (arg->style == APS_CopyAndPointerOnStack) { + stack_usage += 8; + } + } + stack_usage = ALIGN_UP(stack_usage, 16); + + // Note that here we're logically 'after' the call (due to emitting + // instructions in reverse order), so we're doing a negative stack + // allocation to clean up after the call. + Ref stack_size_ref = + getcon(-(int64_t)(stack_usage + SHADOW_SPACE_SIZE), func); + emit(Osalloc, Kl, R, stack_size_ref, R); + + ExtraAlloc* return_pad = NULL; + if (is_struct_return) { + return_pad = alloc(sizeof(ExtraAlloc)); + Ref ret_pad_ref = newtmp("abi.ret_pad", Kl, func); + return_pad->instr = + (Ins){Oalloc8, Kl, ret_pad_ref, {getcon(ret_arg_class.size, func)}}; + return_pad->link = (*pextra_alloc); + *pextra_alloc = return_pad; + reg_usage.rax_returned = true; + emit(Ocopy, call_instr->cls, call_instr->to, TMP(RAX), R); + } else { + if (il_has_struct_return) { + // In the case that at the IL level, a struct return was specified, but as + // far as the calling convention is concerned it's not actually by + // pointer, we need to store the return value into an alloca because + // subsequent IL will still be treating the function return as a pointer. + ExtraAlloc* return_copy = alloc(sizeof(ExtraAlloc)); + return_copy->instr = + (Ins){Oalloc8, Kl, call_instr->to, {getcon(8, func)}}; + return_copy->link = (*pextra_alloc); + *pextra_alloc = return_copy; + Ref copy = newtmp("abi.copy", Kl, func); + emit(Ostorel, 0, R, copy, call_instr->to); + emit(Ocopy, Kl, copy, TMP(RAX), R); + reg_usage.rax_returned = true; + } else if (is_integer_type(call_instr->cls)) { + // Only a basic type returned from the call, integer. + emit(Ocopy, call_instr->cls, call_instr->to, TMP(RAX), R); + reg_usage.rax_returned = true; + } else { + // Basic type, floating point. + emit(Ocopy, call_instr->cls, call_instr->to, TMP(XMM0), R); + reg_usage.xmm0_returned = true; + } + } + + // Emit the actual call instruction. There's no 'to' value by this point + // because we've lowered it into register manipulation (that's the `R`), + // arg[0] of the call is the function, and arg[1] is register usage is + // documented as above (copied from SysV). + emit(Ocall, call_instr->cls, R, call_instr->arg[0], + CALL(register_usage_to_call_arg_value(reg_usage))); + + if (!req(R, env)) { + // If there's an env arg to be passed, it gets stashed in RAX. + emit(Ocopy, Kl, TMP(RAX), env, R); + } + + if (reg_usage.is_varargs_call) { + // Any float arguments need to be duplicated to integer registers. This is + // required by the calling convention so that dumping to shadow space can be + // done without a prototype and for varargs. +#define DUP_IF_USED(index, floatreg, intreg) \ + if (reg_usage.regs_passed[/*float*/ 1][index]) { \ + emit(Ocast, Kl, TMP(intreg), TMP(floatreg), R); \ + } + DUP_IF_USED(0, XMM0, RCX); + DUP_IF_USED(1, XMM1, RDX); + DUP_IF_USED(2, XMM2, R8); + DUP_IF_USED(3, XMM3, R9); +#undef DUP_IF_USED + } + + int reg_counter = 0; + if (is_struct_return) { + Ref first_reg = register_for_arg(Kl, reg_counter++); + emit(Ocopy, Kl, first_reg, return_pad->instr.to, R); + } + + // This is where we actually do the load of values into registers or into + // stack slots. + Ref arg_stack_slots = newtmp("abi.args", Kl, func); + uint slot_offset = SHADOW_SPACE_SIZE; + ArgClass* arg = arg_classes; + for (Ins* instr = earliest_arg_instr; instr != call_instr; ++instr, ++arg) { + switch (arg->style) { + case APS_Register: { + Ref into = register_for_arg(arg->cls, reg_counter++); + if (instr->op == Oargc) { + // If this is a small struct being passed by value. The value in the + // instruction in this case is a pointer, but it needs to be loaded + // into the register. + emit(Oload, arg->cls, into, instr->arg[1], R); + } else { + // Otherwise, a normal value passed in a register. + emit(Ocopy, instr->cls, into, instr->arg[0], R); + } + break; + } + case APS_InlineOnStack: { + Ref slot = newtmp("abi.off", Kl, func); + if (instr->op == Oargc) { + // This is a small struct, so it's not passed by copy, but the + // instruction is a pointer. So we need to copy it into the stack + // slot. (And, remember that these are emitted backwards, so store, + // then load.) + Ref smalltmp = newtmp("abi.smalltmp", arg->cls, func); + emit(Ostorel, 0, R, smalltmp, slot); + emit(Oload, arg->cls, smalltmp, instr->arg[1], R); + } else { + // Stash the value into the stack slot. + emit(Ostorel, 0, R, instr->arg[0], slot); + } + emit(Oadd, Kl, slot, arg_stack_slots, getcon(slot_offset, func)); + slot_offset += arg->size; + break; + } + case APS_CopyAndPointerInRegister: + case APS_CopyAndPointerOnStack: { + // Alloca a space to copy into, and blit the value from the instr to the + // copied location. + ExtraAlloc* arg_copy = alloc(sizeof(ExtraAlloc)); + Ref copy_ref = newtmp("abi.copy", Kl, func); + arg_copy->instr = + (Ins){Oalloc8, Kl, copy_ref, {getcon(arg->size, func)}}; + arg_copy->link = (*pextra_alloc); + *pextra_alloc = arg_copy; + emit(Oblit1, 0, R, INT(arg->size), R); + emit(Oblit0, 0, R, instr->arg[1], copy_ref); + + // Now load the pointer into the correct register or stack slot. + if (arg->style == APS_CopyAndPointerInRegister) { + Ref into = register_for_arg(arg->cls, reg_counter++); + emit(Ocopy, Kl, into, copy_ref, R); + } else { + assert(arg->style == APS_CopyAndPointerOnStack); + Ref slot = newtmp("abi.off", Kl, func); + emit(Ostorel, 0, R, copy_ref, slot); + emit(Oadd, Kl, slot, arg_stack_slots, getcon(slot_offset, func)); + slot_offset += 8; + } + break; + } + case APS_EnvTag: + case APS_VarargsTag: + // Nothing to do here, see right before the call for reg dupe. + break; + case APS_Invalid: + die("unreachable"); + } + } + + if (stack_usage) { + // The last (first in call order) thing we do is allocate the the stack + // space we're going to fill with temporaries. + emit(Osalloc, Kl, arg_stack_slots, + getcon(stack_usage + SHADOW_SPACE_SIZE, func), R); + } else { + // When there's no usage for temporaries, we can add this into the other + // alloca, but otherwise emit it separately (not storing into a reference) + // so that it doesn't get removed later for being useless. + emit(Osalloc, Kl, R, getcon(SHADOW_SPACE_SIZE, func), R); + } + + return instr_past_args; +} + +static void lower_block_return(Fn* func, Blk* block) { + int jmp_type = block->jmp.type; + + if (!isret(jmp_type) || jmp_type == Jret0) { + return; + } + + // Save the argument, and set the block to be a void return because once it's + // lowered it's handled by the the register/stack manipulation. + Ref ret_arg = block->jmp.arg; + block->jmp.type = Jret0; + + RegisterUsage reg_usage = {0}; + + if (jmp_type == Jretc) { + Typ* type = &typ[func->retty]; + if (type_is_by_copy(type)) { + assert(rtype(func->retr) == RTmp); + emit(Ocopy, Kl, TMP(RAX), func->retr, R); + emit(Oblit1, 0, R, INT(type->size), R); + emit(Oblit0, 0, R, ret_arg, func->retr); + } else { + emit(Oload, Kl, TMP(RAX), ret_arg, R); + } + reg_usage.rax_returned = true; + } else { + int k = jmp_type - Jretw; + if (is_integer_type(k)) { + emit(Ocopy, k, TMP(RAX), ret_arg, R); + reg_usage.rax_returned = true; + } else { + emit(Ocopy, k, TMP(XMM0), ret_arg, R); + reg_usage.xmm0_returned = true; + } + } + block->jmp.arg = CALL(register_usage_to_call_arg_value(reg_usage)); +} + +static void lower_vastart(Fn* func, + RegisterUsage* param_reg_usage, + Ref valist) { + assert(func->vararg); + // In varargs functions: + // 1. the int registers are already dumped to the shadow stack space; + // 2. any parameters passed in floating point registers have + // been duplicated to the integer registers + // 3. we ensure (later) that for varargs functions we're always using an rbp + // frame pointer. + // So, the ... argument is just indexed past rbp by the number of named values + // that were actually passed. + + Ref offset = newtmp("abi.vastart", Kl, func); + emit(Ostorel, 0, R, offset, valist); + + // *8 for sizeof(u64), +16 because the return address and rbp have been pushed + // by the time we get to the body of the function. + emit(Oadd, Kl, offset, TMP(RBP), + getcon(param_reg_usage->num_named_args_passed * 8 + 16, func)); +} + +static void lower_vaarg(Fn* func, Ins* vaarg_instr) { + // va_list is just a void** on winx64, so load the pointer, then load the + // argument from that pointer, then increment the pointer to the next arg. + // (All emitted backwards as usual.) + Ref inc = newtmp("abi.vaarg.inc", Kl, func); + Ref ptr = newtmp("abi.vaarg.ptr", Kl, func); + emit(Ostorel, 0, R, inc, vaarg_instr->arg[0]); + emit(Oadd, Kl, inc, ptr, getcon(8, func)); + emit(Oload, vaarg_instr->cls, vaarg_instr->to, ptr, R); + emit(Oload, Kl, ptr, vaarg_instr->arg[0], R); +} + +static void lower_args_for_block(Fn* func, + Blk* block, + RegisterUsage* param_reg_usage, + ExtraAlloc** pextra_alloc) { + // global temporary buffer used by emit. Reset to the end, and predecremented + // when adding to it. + curi = &insb[NIns]; + + lower_block_return(func, block); + + if (block->nins) { + // Work backwards through the instructions, either copying them unchanged, + // or modifying as necessary. + for (Ins* instr = &block->ins[block->nins - 1]; instr >= block->ins;) { + switch (instr->op) { + case Ocall: + instr = lower_call(func, block, instr, pextra_alloc); + break; + case Ovastart: + lower_vastart(func, param_reg_usage, instr->arg[0]); + --instr; + break; + case Ovaarg: + lower_vaarg(func, instr); + --instr; + break; + case Oarg: + case Oargc: + die("unreachable"); + default: + emiti(*instr); + --instr; + break; + } + } + } + + // This it the start block, which is processed last. Add any allocas that + // other blocks needed. + bool is_start_block = block == func->start; + if (is_start_block) { + for (ExtraAlloc* ea = *pextra_alloc; ea; ea = ea->link) { + emiti(ea->instr); + } + } + + // emit/emiti add instructions from the end to the beginning of the temporary + // global buffer. dup the final version into the final block storage. + block->nins = &insb[NIns] - curi; + idup(block, curi, block->nins); +} + +static Ins* find_end_of_func_parameters(Blk* start_block) { + Ins* i; + for (i = start_block->ins; i < &start_block->ins[start_block->nins]; ++i) { + if (!ispar(i->op)) { + break; + } + } + return i; +} + +// Copy from registers/stack into values. +static RegisterUsage lower_func_parameters(Fn* func) { + // This is half-open, so end points after the last Opar. + Blk* start_block = func->start; + Ins* start_of_params = start_block->ins; + Ins* end_of_params = find_end_of_func_parameters(start_block); + + size_t num_params = end_of_params - start_of_params; + ArgClass* arg_classes = alloc(num_params * sizeof(ArgClass)); + ArgClass arg_ret = {0}; + + // global temporary buffer used by emit. Reset to the end, and predecremented + // when adding to it. + curi = &insb[NIns]; + + int reg_counter = 0; + RegisterUsage reg_usage = {0}; + if (func->retty >= 0) { + bool by_copy = type_is_by_copy(&typ[func->retty]); + if (by_copy) { + assign_register_or_stack(®_usage, &arg_ret, /*is_float=*/false, + by_copy); + Ref ret_ref = newtmp("abi.ret", Kl, func); + emit(Ocopy, Kl, ret_ref, TMP(RCX), R); + func->retr = ret_ref; + ++reg_counter; + } + } + Ref env = R; + classify_arguments(®_usage, start_of_params, end_of_params, arg_classes, + &env); + func->reg = amd64_winabi_argregs( + CALL(register_usage_to_call_arg_value(reg_usage)), NULL); + + // Copy from the registers or stack slots into the named parameters. Depending + // on how they're passed, they either need to be copied or loaded. + ArgClass* arg = arg_classes; + uint slot_offset = SHADOW_SPACE_SIZE / 4 + 4; + for (Ins* instr = start_of_params; instr < end_of_params; ++instr, ++arg) { + switch (arg->style) { + case APS_Register: { + Ref from = register_for_arg(arg->cls, reg_counter++); + // If it's a struct at the IL level, we need to copy the register into + // an alloca so we have something to point at (same for InlineOnStack). + if (instr->op == Oparc) { + arg->ref = newtmp("abi", Kl, func); + emit(Ostorel, 0, R, arg->ref, instr->to); + emit(Ocopy, instr->cls, arg->ref, from, R); + emit(Oalloc8, Kl, instr->to, getcon(arg->size, func), R); + } else { + emit(Ocopy, instr->cls, instr->to, from, R); + } + break; + } + case APS_InlineOnStack: + if (instr->op == Oparc) { + arg->ref = newtmp("abi", Kl, func); + emit(Ostorel, 0, R, arg->ref, instr->to); + emit(Ocopy, instr->cls, arg->ref, SLOT(-slot_offset), R); + emit(Oalloc8, Kl, instr->to, getcon(arg->size, func), R); + } else { + emit(Ocopy, Kl, instr->to, SLOT(-slot_offset), R); + } + slot_offset += 2; + break; + case APS_CopyAndPointerOnStack: + emit(Oload, Kl, instr->to, SLOT(-slot_offset), R); + slot_offset += 2; + break; + case APS_CopyAndPointerInRegister: { + // Because this has to be a copy (that we own), it is sufficient to just + // copy the register to the target. + Ref from = register_for_arg(Kl, reg_counter++); + emit(Ocopy, Kl, instr->to, from, R); + break; + } + case APS_EnvTag: + break; + case APS_VarargsTag: + case APS_Invalid: + die("unreachable"); + } + } + + // If there was an `env`, it was passed in RAX, so copy it into the env ref. + if (!req(R, env)) { + emit(Ocopy, Kl, env, TMP(RAX), R); + } + + int num_created_instrs = &insb[NIns] - curi; + int num_other_after_instrs = (int)(start_block->nins - num_params); + int new_total_instrs = num_other_after_instrs + num_created_instrs; + Ins* new_instrs = vnew(new_total_instrs, sizeof(Ins), PFn); + Ins* instr_p = icpy(new_instrs, curi, num_created_instrs); + icpy(instr_p, end_of_params, num_other_after_instrs); + start_block->nins = new_total_instrs; + start_block->ins = new_instrs; + + return reg_usage; +} + +// The main job of this function is to lower generic instructions into the +// specific details of how arguments are passed, and parameters are +// interpreted for win x64. A useful reference is +// https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention . +// +// Some of the major differences from SysV if you're comparing the code +// (non-exhaustive): +// - only 4 int and 4 float regs are used +// - when an int register is assigned a value, its associated float register is +// left unused (and vice versa). i.e. there's only one counter as you assign +// arguments to registers. +// - any structs that aren't 1/2/4/8 bytes in size are passed by pointer, not +// by copying them into the stack. So e.g. if you pass something like +// `struct { void*, int64_t }` by value, it first needs to be copied to +// another alloca (in order to maintain value semantics at the language +// level), then the pointer to that copy is treated as a regular integer +// argument (which then itself may *also* be copied to the stack in the case +// there's no integer register remaining.) +// - when calling a varargs functions, floating point values must be duplicated +// integer registers. Along with the above restrictions, this makes varargs +// handling simpler for the callee than SysV. +void amd64_winabi_abi(Fn* func) { + // The first thing to do is lower incoming parameters to this function. + RegisterUsage param_reg_usage = lower_func_parameters(func); + + // This is the second larger part of the job. We walk all blocks, and rewrite + // instructions returns, calls, and handling of varargs into their win x64 + // specific versions. Any other instructions are just passed through unchanged + // by using `emiti`. + + // Skip over the entry block, and do it at the end so that our later + // modifications can add allocations to the start block. In particular, we + // need to add stack allocas for copies when structs are passed or returned by + // value. + ExtraAlloc* extra_alloc = NULL; + for (Blk* block = func->start->link; block; block = block->link) { + lower_args_for_block(func, block, ¶m_reg_usage, &extra_alloc); + } + lower_args_for_block(func, func->start, ¶m_reg_usage, &extra_alloc); + + if (debug['A']) { + fprintf(stderr, "\n> After ABI lowering:\n"); + printfn(func, stderr); + } +} diff --git a/src/qbe/arm64/abi.c b/src/qbe/arm64/abi.c new file mode 100644 index 00000000..2bafec79 --- /dev/null +++ b/src/qbe/arm64/abi.c @@ -0,0 +1,852 @@ +#include "all.h" + +typedef struct Abi Abi; +typedef struct Class Class; +typedef struct Insl Insl; +typedef struct Params Params; + +enum { + Cstk = 1, /* pass on the stack */ + Cptr = 2, /* replaced by a pointer */ +}; + +struct Class { + char class; + char ishfa; + struct { + char base; + uchar size; + } hfa; + uint size; + uint align; + Typ *t; + uchar nreg; + uchar ngp; + uchar nfp; + int reg[4]; + int cls[4]; +}; + +struct Insl { + Ins i; + Insl *link; +}; + +struct Params { + uint ngp; + uint nfp; + uint stk; +}; + +static int gpreg[12] = {R0, R1, R2, R3, R4, R5, R6, R7}; +static int fpreg[12] = {V0, V1, V2, V3, V4, V5, V6, V7}; +static int store[] = { + [Kw] = Ostorew, [Kl] = Ostorel, + [Ks] = Ostores, [Kd] = Ostored +}; + +/* layout of call's second argument (RCall) + * + * 13 + * 29 14 | 9 5 2 0 + * |0.00|x|x|xxxx|xxxx|xxx|xx| range + * | | | | | ` gp regs returned (0..2) + * | | | | ` fp regs returned (0..4) + * | | | ` gp regs passed (0..8) + * | | ` fp regs passed (0..8) + * | ` indirect result register x8 used (0..1) + * ` env pointer passed in x9 (0..1) + */ + +static int +isfloatv(Typ *t, char *cls) +{ + Field *f; + uint n; + + for (n=0; nnunion; n++) + for (f=t->fields[n]; f->type != FEnd; f++) + switch (f->type) { + case Fs: + if (*cls == Kd) + return 0; + *cls = Ks; + break; + case Fd: + if (*cls == Ks) + return 0; + *cls = Kd; + break; + case FTyp: + if (isfloatv(&typ[f->len], cls)) + break; + /* fall through */ + default: + return 0; + } + return 1; +} + +static void +typclass(Class *c, Typ *t, int *gp, int *fp) +{ + uint64_t sz, hfasz; + uint n; + + sz = (t->size + 7) & -8; + c->t = t; + c->class = 0; + c->ngp = 0; + c->nfp = 0; + c->align = 8; + + if (t->align > 3) + err("alignments larger than 8 are not supported"); + + c->size = sz; + c->hfa.base = Kx; + c->ishfa = isfloatv(t, &c->hfa.base); + hfasz = t->size/(KWIDE(c->hfa.base) ? 8 : 4); + c->ishfa &= !t->isdark && hfasz <= 4; + c->hfa.size = hfasz; + + if (c->ishfa) { + for (n=0; nnfp++) { + c->reg[n] = *fp++; + c->cls[n] = c->hfa.base; + } + c->nreg = n; + } + else if (t->isdark || sz > 16 || sz == 0) { + /* large structs are replaced by a + * pointer to some caller-allocated + * memory */ + c->class |= Cptr; + c->size = 8; + c->ngp = 1; + *c->reg = *gp; + *c->cls = Kl; + } + else { + for (n=0; nngp++) { + c->reg[n] = *gp++; + c->cls[n] = Kl; + } + c->nreg = n; + } +} + +static void +sttmps(Ref tmp[], int cls[], uint nreg, Ref mem, Fn *fn) +{ + uint n; + uint64_t off; + Ref r; + + assert(nreg <= 4); + off = 0; + for (n=0; njmp.type; + + if (!isret(j) || j == Jret0) + return; + + r = b->jmp.arg; + b->jmp.type = Jret0; + + if (j == Jretc) { + typclass(&cr, &typ[fn->retty], gpreg, fpreg); + if (cr.class & Cptr) { + assert(rtype(fn->retr) == RTmp); + emit(Oblit1, 0, R, INT(cr.t->size), R); + emit(Oblit0, 0, R, r, fn->retr); + cty = 0; + } else { + ldregs(cr.reg, cr.cls, cr.nreg, r, fn); + cty = (cr.nfp << 2) | cr.ngp; + } + } else { + k = j - Jretw; + if (KBASE(k) == 0) { + emit(Ocopy, k, TMP(R0), r, R); + cty = 1; + } else { + emit(Ocopy, k, TMP(V0), r, R); + cty = 1 << 2; + } + } + + b->jmp.arg = CALL(cty); +} + +static int +argsclass(Ins *i0, Ins *i1, Class *carg) +{ + int va, envc, ngp, nfp, *gp, *fp; + Class *c; + Ins *i; + + va = 0; + envc = 0; + gp = gpreg; + fp = fpreg; + ngp = 8; + nfp = 8; + for (i=i0, c=carg; iop) { + case Oargsb: + case Oargub: + case Oparsb: + case Oparub: + c->size = 1; + goto Scalar; + case Oargsh: + case Oarguh: + case Oparsh: + case Oparuh: + c->size = 2; + goto Scalar; + case Opar: + case Oarg: + c->size = 8; + if (T.apple && !KWIDE(i->cls)) + c->size = 4; + Scalar: + c->align = c->size; + *c->cls = i->cls; + if (va) { + c->class |= Cstk; + c->size = 8; + c->align = 8; + break; + } + if (KBASE(i->cls) == 0 && ngp > 0) { + ngp--; + *c->reg = *gp++; + break; + } + if (KBASE(i->cls) == 1 && nfp > 0) { + nfp--; + *c->reg = *fp++; + break; + } + c->class |= Cstk; + break; + case Oparc: + case Oargc: + typclass(c, &typ[i->arg[0].val], gp, fp); + if (c->ngp <= ngp) { + if (c->nfp <= nfp) { + ngp -= c->ngp; + nfp -= c->nfp; + gp += c->ngp; + fp += c->nfp; + break; + } else + nfp = 0; + } else + ngp = 0; + c->class |= Cstk; + break; + case Opare: + case Oarge: + *c->reg = R9; + *c->cls = Kl; + envc = 1; + break; + case Oargv: + va = T.apple != 0; + break; + default: + die("unreachable"); + } + + return envc << 14 | (gp-gpreg) << 5 | (fp-fpreg) << 9; +} + +bits +arm64_retregs(Ref r, int p[2]) +{ + bits b; + int ngp, nfp; + + assert(rtype(r) == RCall); + ngp = r.val & 3; + nfp = (r.val >> 2) & 7; + if (p) { + p[0] = ngp; + p[1] = nfp; + } + b = 0; + while (ngp--) + b |= BIT(R0+ngp); + while (nfp--) + b |= BIT(V0+nfp); + return b; +} + +bits +arm64_argregs(Ref r, int p[2]) +{ + bits b; + int ngp, nfp, x8, x9; + + assert(rtype(r) == RCall); + ngp = (r.val >> 5) & 15; + nfp = (r.val >> 9) & 15; + x8 = (r.val >> 13) & 1; + x9 = (r.val >> 14) & 1; + if (p) { + p[0] = ngp + x8 + x9; + p[1] = nfp; + } + b = 0; + while (ngp--) + b |= BIT(R0+ngp); + while (nfp--) + b |= BIT(V0+nfp); + return b | ((bits)x8 << R8) | ((bits)x9 << R9); +} + +static void +stkblob(Ref r, Class *c, Fn *fn, Insl **ilp) +{ + Insl *il; + int al; + uint64_t sz; + + il = alloc(sizeof *il); + al = c->t->align - 2; /* NAlign == 3 */ + if (al < 0) + al = 0; + sz = c->class & Cptr ? c->t->size : c->size; + il->i = (Ins){Oalloc+al, Kl, r, {getcon(sz, fn)}}; + il->link = *ilp; + *ilp = il; +} + +static uint +align(uint x, uint al) +{ + return (x + al-1) & -al; +} + +static void +selcall(Fn *fn, Ins *i0, Ins *i1, Insl **ilp) +{ + Ins *i; + Class *ca, *c, cr; + int op, cty; + uint n, stk, off;; + Ref r, rstk, tmp[4]; + + ca = alloc((i1-i0) * sizeof ca[0]); + cty = argsclass(i0, i1, ca); + + stk = 0; + for (i=i0, c=ca; iclass & Cptr) { + i->arg[0] = newtmp("abi", Kl, fn); + stkblob(i->arg[0], c, fn, ilp); + i->op = Oarg; + } + if (c->class & Cstk) { + stk = align(stk, c->align); + stk += c->size; + } + } + stk = align(stk, 16); + rstk = getcon(stk, fn); + if (stk) + emit(Oadd, Kl, TMP(SP), TMP(SP), rstk); + + if (!req(i1->arg[1], R)) { + typclass(&cr, &typ[i1->arg[1].val], gpreg, fpreg); + stkblob(i1->to, &cr, fn, ilp); + cty |= (cr.nfp << 2) | cr.ngp; + if (cr.class & Cptr) { + /* spill & rega expect calls to be + * followed by copies from regs, + * so we emit a dummy + */ + cty |= 1 << 13 | 1; + emit(Ocopy, Kw, R, TMP(R0), R); + } else { + sttmps(tmp, cr.cls, cr.nreg, i1->to, fn); + for (n=0; ncls) == 0) { + emit(Ocopy, i1->cls, i1->to, TMP(R0), R); + cty |= 1; + } else { + emit(Ocopy, i1->cls, i1->to, TMP(V0), R); + cty |= 1 << 2; + } + } + + emit(Ocall, 0, R, i1->arg[0], CALL(cty)); + + if (cty & (1 << 13)) + /* struct return argument */ + emit(Ocopy, Kl, TMP(R8), i1->to, R); + + for (i=i0, c=ca; iclass & Cstk) != 0) + continue; + if (i->op == Oarg || i->op == Oarge || isargbh(i->op)) + emit(Ocopy, *c->cls, TMP(*c->reg), i->arg[0], R); + if (i->op == Oargc) + ldregs(c->reg, c->cls, c->nreg, i->arg[1], fn); + } + + /* populate the stack */ + off = 0; + for (i=i0, c=ca; iclass & Cstk) == 0) + continue; + off = align(off, c->align); + r = newtmp("abi", Kl, fn); + if (i->op == Oarg || isargbh(i->op)) { + switch (c->size) { + case 1: op = Ostoreb; break; + case 2: op = Ostoreh; break; + case 4: + case 8: op = store[*c->cls]; break; + default: die("unreachable"); + } + emit(op, 0, R, i->arg[0], r); + } else { + assert(i->op == Oargc); + emit(Oblit1, 0, R, INT(c->size), R); + emit(Oblit0, 0, R, i->arg[1], r); + } + emit(Oadd, Kl, r, TMP(SP), getcon(off, fn)); + off += c->size; + } + if (stk) + emit(Osub, Kl, TMP(SP), TMP(SP), rstk); + + for (i=i0, c=ca; iclass & Cptr) { + emit(Oblit1, 0, R, INT(c->t->size), R); + emit(Oblit0, 0, R, i->arg[1], i->arg[0]); + } +} + +static Params +selpar(Fn *fn, Ins *i0, Ins *i1) +{ + Class *ca, *c, cr; + Insl *il; + Ins *i; + int op, n, cty; + uint off; + Ref r, tmp[16], *t; + + ca = alloc((i1-i0) * sizeof ca[0]); + curi = &insb[NIns]; + + cty = argsclass(i0, i1, ca); + fn->reg = arm64_argregs(CALL(cty), 0); + + il = 0; + t = tmp; + for (i=i0, c=ca; iop != Oparc || (c->class & (Cptr|Cstk))) + continue; + sttmps(t, c->cls, c->nreg, i->to, fn); + stkblob(i->to, c, fn, &il); + t += c->nreg; + } + for (; il; il=il->link) + emiti(il->i); + + if (fn->retty >= 0) { + typclass(&cr, &typ[fn->retty], gpreg, fpreg); + if (cr.class & Cptr) { + fn->retr = newtmp("abi", Kl, fn); + emit(Ocopy, Kl, fn->retr, TMP(R8), R); + fn->reg |= BIT(R8); + } + } + + t = tmp; + off = 0; + for (i=i0, c=ca; iop == Oparc && !(c->class & Cptr)) { + if (c->class & Cstk) { + off = align(off, c->align); + fn->tmp[i->to.val].slot = -(off+2); + off += c->size; + } else + for (n=0; nnreg; n++) { + r = TMP(c->reg[n]); + emit(Ocopy, c->cls[n], *t++, r, R); + } + } else if (c->class & Cstk) { + off = align(off, c->align); + if (isparbh(i->op)) + op = Oloadsb + (i->op - Oparsb); + else + op = Oload; + emit(op, *c->cls, i->to, SLOT(-(off+2)), R); + off += c->size; + } else { + emit(Ocopy, *c->cls, i->to, TMP(*c->reg), R); + } + + return (Params){ + .stk = align(off, 8), + .ngp = (cty >> 5) & 15, + .nfp = (cty >> 9) & 15 + }; +} + +static Blk * +split(Fn *fn, Blk *b) +{ + Blk *bn; + + ++fn->nblk; + bn = newblk(); + idup(bn, curi, &insb[NIns]-curi); + curi = &insb[NIns]; + bn->visit = ++b->visit; + strf(bn->name, "%s.%d", b->name, b->visit); + bn->loop = b->loop; + bn->link = b->link; + b->link = bn; + return bn; +} + +static void +chpred(Blk *b, Blk *bp, Blk *bp1) +{ + Phi *p; + uint a; + + for (p=b->phi; p; p=p->link) { + for (a=0; p->blk[a]!=bp; a++) + assert(a+1narg); + p->blk[a] = bp1; + } +} + +static void +apple_selvaarg(Fn *fn, Blk *b, Ins *i) +{ + Ref ap, stk, stk8, c8; + + (void)b; + c8 = getcon(8, fn); + ap = i->arg[0]; + stk8 = newtmp("abi", Kl, fn); + stk = newtmp("abi", Kl, fn); + + emit(Ostorel, 0, R, stk8, ap); + emit(Oadd, Kl, stk8, stk, c8); + emit(Oload, i->cls, i->to, stk, R); + emit(Oload, Kl, stk, ap, R); +} + +static void +arm64_selvaarg(Fn *fn, Blk *b, Ins *i) +{ + Ref loc, lreg, lstk, nr, r0, r1, c8, c16, c24, c28, ap; + Blk *b0, *bstk, *breg; + int isgp; + + c8 = getcon(8, fn); + c16 = getcon(16, fn); + c24 = getcon(24, fn); + c28 = getcon(28, fn); + ap = i->arg[0]; + isgp = KBASE(i->cls) == 0; + + /* @b [...] + r0 =l add ap, (24 or 28) + nr =l loadsw r0 + r1 =w csltw nr, 0 + jnz r1, @breg, @bstk + @breg + r0 =l add ap, (8 or 16) + r1 =l loadl r0 + lreg =l add r1, nr + r0 =w add nr, (8 or 16) + r1 =l add ap, (24 or 28) + storew r0, r1 + @bstk + lstk =l loadl ap + r0 =l add lstk, 8 + storel r0, ap + @b0 + %loc =l phi @breg %lreg, @bstk %lstk + i->to =(i->cls) load %loc + */ + + loc = newtmp("abi", Kl, fn); + emit(Oload, i->cls, i->to, loc, R); + b0 = split(fn, b); + b0->jmp = b->jmp; + b0->s1 = b->s1; + b0->s2 = b->s2; + if (b->s1) + chpred(b->s1, b, b0); + if (b->s2 && b->s2 != b->s1) + chpred(b->s2, b, b0); + + lreg = newtmp("abi", Kl, fn); + nr = newtmp("abi", Kl, fn); + r0 = newtmp("abi", Kw, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorew, Kw, R, r0, r1); + emit(Oadd, Kl, r1, ap, isgp ? c24 : c28); + emit(Oadd, Kw, r0, nr, isgp ? c8 : c16); + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Oadd, Kl, lreg, r1, nr); + emit(Oload, Kl, r1, r0, R); + emit(Oadd, Kl, r0, ap, isgp ? c8 : c16); + breg = split(fn, b); + breg->jmp.type = Jjmp; + breg->s1 = b0; + + lstk = newtmp("abi", Kl, fn); + r0 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r0, ap); + emit(Oadd, Kl, r0, lstk, c8); + emit(Oload, Kl, lstk, ap, R); + bstk = split(fn, b); + bstk->jmp.type = Jjmp; + bstk->s1 = b0; + + b0->phi = alloc(sizeof *b0->phi); + *b0->phi = (Phi){ + .cls = Kl, .to = loc, + .narg = 2, + .blk = vnew(2, sizeof b0->phi->blk[0], PFn), + .arg = vnew(2, sizeof b0->phi->arg[0], PFn), + }; + b0->phi->blk[0] = bstk; + b0->phi->blk[1] = breg; + b0->phi->arg[0] = lstk; + b0->phi->arg[1] = lreg; + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kw, fn); + b->jmp.type = Jjnz; + b->jmp.arg = r1; + b->s1 = breg; + b->s2 = bstk; + emit(Ocmpw+Cislt, Kw, r1, nr, CON_Z); + emit(Oloadsw, Kl, nr, r0, R); + emit(Oadd, Kl, r0, ap, isgp ? c24 : c28); +} + +static void +apple_selvastart(Fn *fn, Params p, Ref ap) +{ + Ref off, stk, arg; + + off = getcon(p.stk, fn); + stk = newtmp("abi", Kl, fn); + arg = newtmp("abi", Kl, fn); + + emit(Ostorel, 0, R, arg, ap); + emit(Oadd, Kl, arg, stk, off); + emit(Oaddr, Kl, stk, SLOT(-1), R); +} + +static void +arm64_selvastart(Fn *fn, Params p, Ref ap) +{ + Ref r0, r1, rsave; + + rsave = newtmp("abi", Kl, fn); + + r0 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r0, ap); + emit(Oadd, Kl, r0, rsave, getcon(p.stk + 192, fn)); + + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r1, r0); + emit(Oadd, Kl, r1, rsave, getcon(64, fn)); + emit(Oadd, Kl, r0, ap, getcon(8, fn)); + + r0 = newtmp("abi", Kl, fn); + r1 = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, r1, r0); + emit(Oadd, Kl, r1, rsave, getcon(192, fn)); + emit(Oaddr, Kl, rsave, SLOT(-1), R); + emit(Oadd, Kl, r0, ap, getcon(16, fn)); + + r0 = newtmp("abi", Kl, fn); + emit(Ostorew, Kw, R, getcon((p.ngp-8)*8, fn), r0); + emit(Oadd, Kl, r0, ap, getcon(24, fn)); + + r0 = newtmp("abi", Kl, fn); + emit(Ostorew, Kw, R, getcon((p.nfp-8)*16, fn), r0); + emit(Oadd, Kl, r0, ap, getcon(28, fn)); +} + +void +arm64_abi(Fn *fn) +{ + Blk *b; + Ins *i, *i0; + Insl *il; + int n0, n1, ioff; + Params p; + + for (b=fn->start; b; b=b->link) + b->visit = 0; + + /* lower parameters */ + for (b=fn->start, i=b->ins; i<&b->ins[b->nins]; i++) + if (!ispar(i->op)) + break; + p = selpar(fn, b->ins, i); + n0 = &insb[NIns] - curi; + ioff = i - b->ins; + n1 = b->nins - ioff; + vgrow(&b->ins, n0+n1); + icpy(b->ins+n0, b->ins+ioff, n1); + icpy(b->ins, curi, n0); + b->nins = n0+n1; + + /* lower calls, returns, and vararg instructions */ + il = 0; + b = fn->start; + do { + if (!(b = b->link)) + b = fn->start; /* do it last */ + if (b->visit) + continue; + curi = &insb[NIns]; + selret(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) + switch ((--i)->op) { + default: + emiti(*i); + break; + case Ocall: + for (i0=i; i0>b->ins; i0--) + if (!isarg((i0-1)->op)) + break; + selcall(fn, i0, i, &il); + i = i0; + break; + case Ovastart: + if (T.apple) + apple_selvastart(fn, p, i->arg[0]); + else + arm64_selvastart(fn, p, i->arg[0]); + break; + case Ovaarg: + if (T.apple) + apple_selvaarg(fn, b, i); + else + arm64_selvaarg(fn, b, i); + break; + case Oarg: + case Oargc: + die("unreachable"); + } + if (b == fn->start) + for (; il; il=il->link) + emiti(il->i); + idup(b, curi, &insb[NIns]-curi); + } while (b != fn->start); + + if (debug['A']) { + fprintf(stderr, "\n> After ABI lowering:\n"); + printfn(fn, stderr); + } +} + +/* abi0 for apple target; introduces + * necessary sign extensions in calls + * and returns + */ +void +apple_extsb(Fn *fn) +{ + Blk *b; + Ins *i0, *i1, *i; + int j, op; + Ref r; + + for (b=fn->start; b; b=b->link) { + curi = &insb[NIns]; + j = b->jmp.type; + if (isretbh(j)) { + r = newtmp("abi", Kw, fn); + op = Oextsb + (j - Jretsb); + emit(op, Kw, r, b->jmp.arg, R); + b->jmp.arg = r; + b->jmp.type = Jretw; + } + for (i=&b->ins[b->nins]; i>b->ins;) { + emiti(*--i); + if (i->op != Ocall) + continue; + for (i0=i1=i; i0>b->ins; i0--) + if (!isarg((i0-1)->op)) + break; + for (i=i1; i>i0;) { + emiti(*--i); + if (isargbh(i->op)) { + i->to = newtmp("abi", Kl, fn); + curi->arg[0] = i->to; + } + } + for (i=i1; i>i0;) + if (isargbh((--i)->op)) { + op = Oextsb + (i->op - Oargsb); + emit(op, Kw, i->to, i->arg[0], R); + } + } + idup(b, curi, &insb[NIns]-curi); + } + + if (debug['A']) { + fprintf(stderr, "\n> After Apple pre-ABI:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/arm64/all.h b/src/qbe/arm64/all.h new file mode 100644 index 00000000..49f5d869 --- /dev/null +++ b/src/qbe/arm64/all.h @@ -0,0 +1,38 @@ +#include "../all.h" + +enum Arm64Reg { + R0 = RXX + 1, + R1, R2, R3, R4, R5, R6, R7, + R8, R9, R10, R11, R12, R13, R14, R15, + IP0, IP1, R18, R19, R20, R21, R22, R23, + R24, R25, R26, R27, R28, FP, LR, SP, + + V0, V1, V2, V3, V4, V5, V6, V7, + V8, V9, V10, V11, V12, V13, V14, V15, + V16, V17, V18, V19, V20, V21, V22, V23, + V24, V25, V26, V27, V28, V29, V30, /* V31, */ + + NFPR = V30 - V0 + 1, + NGPR = SP - R0 + 1, + NGPS = R18 - R0 + 1 /* LR */ + 1, + NFPS = (V7 - V0 + 1) + (V30 - V16 + 1), + NCLR = (R28 - R19 + 1) + (V15 - V8 + 1), +}; +MAKESURE(reg_not_tmp, V30 < (int)Tmp0); + +/* targ.c */ +extern int arm64_rsave[]; +extern int arm64_rclob[]; + +/* abi.c */ +bits arm64_retregs(Ref, int[2]); +bits arm64_argregs(Ref, int[2]); +void arm64_abi(Fn *); +void apple_extsb(Fn *); + +/* isel.c */ +int arm64_logimm(uint64_t, int); +void arm64_isel(Fn *); + +/* emit.c */ +void arm64_emitfn(Fn *, FILE *); diff --git a/src/qbe/arm64/emit.c b/src/qbe/arm64/emit.c new file mode 100644 index 00000000..c339e41f --- /dev/null +++ b/src/qbe/arm64/emit.c @@ -0,0 +1,679 @@ +#include "all.h" + +typedef struct E E; + +struct E { + FILE *f; + Fn *fn; + uint64_t frame; + uint padding; +}; + +#define CMP(X) \ + X(Cieq, "eq") \ + X(Cine, "ne") \ + X(Cisge, "ge") \ + X(Cisgt, "gt") \ + X(Cisle, "le") \ + X(Cislt, "lt") \ + X(Ciuge, "cs") \ + X(Ciugt, "hi") \ + X(Ciule, "ls") \ + X(Ciult, "cc") \ + X(NCmpI+Cfeq, "eq") \ + X(NCmpI+Cfge, "ge") \ + X(NCmpI+Cfgt, "gt") \ + X(NCmpI+Cfle, "ls") \ + X(NCmpI+Cflt, "mi") \ + X(NCmpI+Cfne, "ne") \ + X(NCmpI+Cfo, "vc") \ + X(NCmpI+Cfuo, "vs") + +enum { + Ki = -1, /* matches Kw and Kl */ + Ka = -2, /* matches all classes */ +}; + +static struct { + short op; + short cls; + char *fmt; +} omap[] = { + { Oadd, Ki, "add %=, %0, %1" }, + { Oadd, Ka, "fadd %=, %0, %1" }, + { Osub, Ki, "sub %=, %0, %1" }, + { Osub, Ka, "fsub %=, %0, %1" }, + { Oneg, Ki, "neg %=, %0" }, + { Oneg, Ka, "fneg %=, %0" }, + { Oand, Ki, "and %=, %0, %1" }, + { Oor, Ki, "orr %=, %0, %1" }, + { Oxor, Ki, "eor %=, %0, %1" }, + { Osar, Ki, "asr %=, %0, %1" }, + { Oshr, Ki, "lsr %=, %0, %1" }, + { Oshl, Ki, "lsl %=, %0, %1" }, + { Omul, Ki, "mul %=, %0, %1" }, + { Omul, Ka, "fmul %=, %0, %1" }, + { Odiv, Ki, "sdiv %=, %0, %1" }, + { Odiv, Ka, "fdiv %=, %0, %1" }, + { Oudiv, Ki, "udiv %=, %0, %1" }, + { Orem, Ki, "sdiv %?, %0, %1\n\tmsub\t%=, %?, %1, %0" }, + { Ourem, Ki, "udiv %?, %0, %1\n\tmsub\t%=, %?, %1, %0" }, + { Ocopy, Ki, "mov %=, %0" }, + { Ocopy, Ka, "fmov %=, %0" }, + { Oswap, Ki, "mov %?, %0\n\tmov\t%0, %1\n\tmov\t%1, %?" }, + { Oswap, Ka, "fmov %?, %0\n\tfmov\t%0, %1\n\tfmov\t%1, %?" }, + { Ostoreb, Kw, "strb %W0, %M1" }, + { Ostoreh, Kw, "strh %W0, %M1" }, + { Ostorew, Kw, "str %W0, %M1" }, + { Ostorel, Kw, "str %L0, %M1" }, + { Ostores, Kw, "str %S0, %M1" }, + { Ostored, Kw, "str %D0, %M1" }, + { Oloadsb, Ki, "ldrsb %=, %M0" }, + { Oloadub, Ki, "ldrb %W=, %M0" }, + { Oloadsh, Ki, "ldrsh %=, %M0" }, + { Oloaduh, Ki, "ldrh %W=, %M0" }, + { Oloadsw, Kw, "ldr %=, %M0" }, + { Oloadsw, Kl, "ldrsw %=, %M0" }, + { Oloaduw, Ki, "ldr %W=, %M0" }, + { Oload, Ka, "ldr %=, %M0" }, + { Oextsb, Ki, "sxtb %=, %W0" }, + { Oextub, Ki, "uxtb %W=, %W0" }, + { Oextsh, Ki, "sxth %=, %W0" }, + { Oextuh, Ki, "uxth %W=, %W0" }, + { Oextsw, Ki, "sxtw %L=, %W0" }, + { Oextuw, Ki, "mov %W=, %W0" }, + { Oexts, Kd, "fcvt %=, %S0" }, + { Otruncd, Ks, "fcvt %=, %D0" }, + { Ocast, Kw, "fmov %=, %S0" }, + { Ocast, Kl, "fmov %=, %D0" }, + { Ocast, Ks, "fmov %=, %W0" }, + { Ocast, Kd, "fmov %=, %L0" }, + { Ostosi, Ka, "fcvtzs %=, %S0" }, + { Ostoui, Ka, "fcvtzu %=, %S0" }, + { Odtosi, Ka, "fcvtzs %=, %D0" }, + { Odtoui, Ka, "fcvtzu %=, %D0" }, + { Oswtof, Ka, "scvtf %=, %W0" }, + { Ouwtof, Ka, "ucvtf %=, %W0" }, + { Osltof, Ka, "scvtf %=, %L0" }, + { Oultof, Ka, "ucvtf %=, %L0" }, + { Ocall, Kw, "blr %L0" }, + + { Oacmp, Ki, "cmp %0, %1" }, + { Oacmn, Ki, "cmn %0, %1" }, + { Oafcmp, Ka, "fcmpe %0, %1" }, + +#define X(c, str) \ + { Oflag+c, Ki, "cset %=, " str }, + CMP(X) +#undef X + { NOp, 0, 0 } +}; + +enum { + V31 = 0x1fffffff, /* local name for V31 */ +}; + +static char * +rname(int r, int k) +{ + static char buf[4]; + + if (r == SP) { + assert(k == Kl); + sprintf(buf, "sp"); + } + else if (R0 <= r && r <= LR) + switch (k) { + default: die("invalid class"); + case Kw: sprintf(buf, "w%d", r-R0); break; + case Kx: + case Kl: sprintf(buf, "x%d", r-R0); break; + } + else if (V0 <= r && r <= V30) + switch (k) { + default: die("invalid class"); + case Ks: sprintf(buf, "s%d", r-V0); break; + case Kx: + case Kd: sprintf(buf, "d%d", r-V0); break; + } + else if (r == V31) + switch (k) { + default: die("invalid class"); + case Ks: sprintf(buf, "s31"); break; + case Kd: sprintf(buf, "d31"); break; + } + else + die("invalid register"); + return buf; +} + +static uint64_t +slot(Ref r, E *e) +{ + int s; + + s = rsval(r); + if (s == -1) + return 16 + e->frame; + if (s < 0) { + if (e->fn->vararg && !T.apple) + return 16 + e->frame + 192 - (s+2); + else + return 16 + e->frame - (s+2); + } else + return 16 + e->padding + 4 * s; +} + +static void +emitf(char *s, Ins *i, E *e) +{ + Ref r; + int k, c; + Con *pc; + uint64_t n; + uint sp; + + fputc('\t', e->f); + + sp = 0; + for (;;) { + k = i->cls; + while ((c = *s++) != '%') + if (c == ' ' && !sp) { + fputc('\t', e->f); + sp = 1; + } else if (!c) { + fputc('\n', e->f); + return; + } else + fputc(c, e->f); + Switch: + switch ((c = *s++)) { + default: + die("invalid escape"); + case 'W': + k = Kw; + goto Switch; + case 'L': + k = Kl; + goto Switch; + case 'S': + k = Ks; + goto Switch; + case 'D': + k = Kd; + goto Switch; + case '?': + if (KBASE(k) == 0) + fputs(rname(IP1, k), e->f); + else + fputs(rname(V31, k), e->f); + break; + case '=': + case '0': + r = c == '=' ? i->to : i->arg[0]; + assert(isreg(r) || req(r, TMP(V31))); + fputs(rname(r.val, k), e->f); + break; + case '1': + r = i->arg[1]; + switch (rtype(r)) { + default: + die("invalid second argument"); + case RTmp: + assert(isreg(r)); + fputs(rname(r.val, k), e->f); + break; + case RCon: + pc = &e->fn->con[r.val]; + n = pc->bits.i; + assert(pc->type == CBits); + if (n >> 24) { + assert(arm64_logimm(n, k)); + fprintf(e->f, "#%"PRIu64, n); + } else if (n & 0xfff000) { + assert(!(n & ~0xfff000ull)); + fprintf(e->f, "#%"PRIu64", lsl #12", + n>>12); + } else { + assert(!(n & ~0xfffull)); + fprintf(e->f, "#%"PRIu64, n); + } + break; + } + break; + case 'M': + c = *s++; + assert(c == '0' || c == '1' || c == '='); + r = c == '=' ? i->to : i->arg[c - '0']; + switch (rtype(r)) { + default: + die("todo (arm emit): unhandled ref"); + case RTmp: + assert(isreg(r)); + fprintf(e->f, "[%s]", rname(r.val, Kl)); + break; + case RSlot: + fprintf(e->f, "[x29, %"PRIu64"]", slot(r, e)); + break; + } + break; + } + } +} + +static void +loadaddr(Con *c, char *rn, E *e) +{ + char *p, *l, *s; + + switch (c->sym.type) { + default: + die("unreachable"); + case SGlo: + if (T.apple) + s = "\tadrp\tR, S@pageO\n" + "\tadd\tR, R, S@pageoffO\n"; + else + s = "\tadrp\tR, SO\n" + "\tadd\tR, R, #:lo12:SO\n"; + break; + case SThr: + if (T.apple) + s = "\tadrp\tR, S@tlvppage\n" + "\tldr\tR, [R, S@tlvppageoff]\n"; + else + s = "\tmrs\tR, tpidr_el0\n" + "\tadd\tR, R, #:tprel_hi12:SO, lsl #12\n" + "\tadd\tR, R, #:tprel_lo12_nc:SO\n"; + break; + } + + l = str(c->sym.id); + p = l[0] == '"' ? "" : T.assym; + for (; *s; s++) + switch (*s) { + default: + fputc(*s, e->f); + break; + case 'R': + fputs(rn, e->f); + break; + case 'S': + fputs(p, e->f); + fputs(l, e->f); + break; + case 'O': + if (c->bits.i) + /* todo, handle large offsets */ + fprintf(e->f, "+%"PRIi64, c->bits.i); + break; + } +} + +static void +loadcon(Con *c, int r, int k, E *e) +{ + char *rn; + int64_t n; + int w, sh; + + w = KWIDE(k); + rn = rname(r, k); + n = c->bits.i; + if (c->type == CAddr) { + rn = rname(r, Kl); + loadaddr(c, rn, e); + return; + } + assert(c->type == CBits); + if (!w) + n = (int32_t)n; + if ((n | 0xffff) == -1 || arm64_logimm(n, k)) { + fprintf(e->f, "\tmov\t%s, #%"PRIi64"\n", rn, n); + } else { + fprintf(e->f, "\tmov\t%s, #%d\n", + rn, (int)(n & 0xffff)); + for (sh=16; n>>=16; sh+=16) { + if ((!w && sh == 32) || sh == 64) + break; + fprintf(e->f, "\tmovk\t%s, #0x%x, lsl #%d\n", + rn, (uint)(n & 0xffff), sh); + } + } +} + +static void emitins(Ins *, E *); + +static int +fixarg(Ref *pr, int sz, int t, E *e) +{ + Ins *i; + Ref r; + uint64_t s; + + r = *pr; + if (rtype(r) == RSlot) { + s = slot(r, e); + if (s > sz * 4095u) { + if (t < 0) + return 1; + i = &(Ins){Oaddr, Kl, TMP(t), {r}}; + emitins(i, e); + *pr = TMP(t); + } + } + return 0; +} + +static void +emitins(Ins *i, E *e) +{ + char *l, *p, *rn; + uint64_t s; + int o, t; + Ref r; + Con *c; + + switch (i->op) { + default: + if (isload(i->op)) + fixarg(&i->arg[0], loadsz(i), IP1, e); + if (isstore(i->op)) { + t = T.apple ? -1 : R18; + if (fixarg(&i->arg[1], storesz(i), t, e)) { + if (req(i->arg[0], TMP(IP1))) { + fprintf(e->f, + "\tfmov\t%c31, %c17\n", + "ds"[i->cls == Kw], + "xw"[i->cls == Kw]); + i->arg[0] = TMP(V31); + i->op = Ostores + (i->cls-Kw); + } + fixarg(&i->arg[1], storesz(i), IP1, e); + } + } + Table: + /* most instructions are just pulled out of + * the table omap[], some special cases are + * detailed below */ + for (o=0;; o++) { + /* this linear search should really be a binary + * search */ + if (omap[o].op == NOp) + die("no match for %s(%c)", + optab[i->op].name, "wlsd"[i->cls]); + if (omap[o].op == i->op) + if (omap[o].cls == i->cls || omap[o].cls == Ka + || (omap[o].cls == Ki && KBASE(i->cls) == 0)) + break; + } + emitf(omap[o].fmt, i, e); + break; + case Onop: + break; + case Ocopy: + if (req(i->to, i->arg[0])) + break; + if (rtype(i->to) == RSlot) { + r = i->to; + if (!isreg(i->arg[0])) { + i->to = TMP(IP1); + emitins(i, e); + i->arg[0] = i->to; + } + i->op = Ostorew + i->cls; + i->cls = Kw; + i->arg[1] = r; + emitins(i, e); + break; + } + assert(isreg(i->to)); + switch (rtype(i->arg[0])) { + case RCon: + c = &e->fn->con[i->arg[0].val]; + loadcon(c, i->to.val, i->cls, e); + break; + case RSlot: + i->op = Oload; + emitins(i, e); + break; + default: + assert(i->to.val != IP1); + goto Table; + } + break; + case Oaddr: + assert(rtype(i->arg[0]) == RSlot); + rn = rname(i->to.val, Kl); + s = slot(i->arg[0], e); + if (s <= 4095) + fprintf(e->f, "\tadd\t%s, x29, #%"PRIu64"\n", rn, s); + else if (s <= 65535) + fprintf(e->f, + "\tmov\t%s, #%"PRIu64"\n" + "\tadd\t%s, x29, %s\n", + rn, s, rn, rn + ); + else + fprintf(e->f, + "\tmov\t%s, #%"PRIu64"\n" + "\tmovk\t%s, #%"PRIu64", lsl #16\n" + "\tadd\t%s, x29, %s\n", + rn, s & 0xFFFF, rn, s >> 16, rn, rn + ); + break; + case Ocall: + if (rtype(i->arg[0]) != RCon) + goto Table; + c = &e->fn->con[i->arg[0].val]; + if (c->type != CAddr + || c->sym.type != SGlo + || c->bits.i) + die("invalid call argument"); + l = str(c->sym.id); + p = l[0] == '"' ? "" : T.assym; + fprintf(e->f, "\tbl\t%s%s\n", p, l); + break; + case Osalloc: + emitf("sub sp, sp, %0", i, e); + if (!req(i->to, R)) + emitf("mov %=, sp", i, e); + break; + case Odbgloc: + emitdbgloc(i->arg[0].val, i->arg[1].val, e->f); + break; + } +} + +static void +framelayout(E *e) +{ + int *r; + uint o; + uint64_t f; + + for (o=0, r=arm64_rclob; *r>=0; r++) + o += 1 & (e->fn->reg >> *r); + f = e->fn->slot; + f = (f + 3) & -4; + o += o & 1; + e->padding = 4*(f-e->fn->slot); + e->frame = 4*f + 8*o; +} + +/* + + Stack-frame layout: + + +=============+ + | varargs | + | save area | + +-------------+ + | callee-save | ^ + | registers | | + +-------------+ | + | ... | | + | spill slots | | + | ... | | e->frame + +-------------+ | + | ... | | + | locals | | + | ... | | + +-------------+ | + | e->padding | v + +-------------+ + | saved x29 | + | saved x30 | + +=============+ <- x29 + +*/ + +void +arm64_emitfn(Fn *fn, FILE *out) +{ + static char *ctoa[] = { + #define X(c, s) [c] = s, + CMP(X) + #undef X + }; + static int id0; + int s, n, c, lbl, *r; + uint64_t o; + Blk *b, *t; + Ins *i; + E *e; + + e = &(E){.f = out, .fn = fn}; + if (T.apple) + e->fn->lnk.align = 4; + emitfnlnk(e->fn->name, &e->fn->lnk, e->f); + fputs("\thint\t#34\n", e->f); + framelayout(e); + + if (e->fn->vararg && !T.apple) { + for (n=7; n>=0; n--) + fprintf(e->f, "\tstr\tq%d, [sp, -16]!\n", n); + for (n=7; n>=0; n-=2) + fprintf(e->f, "\tstp\tx%d, x%d, [sp, -16]!\n", n-1, n); + } + + if (e->frame + 16 <= 512) + fprintf(e->f, + "\tstp\tx29, x30, [sp, -%"PRIu64"]!\n", + e->frame + 16 + ); + else if (e->frame <= 4095) + fprintf(e->f, + "\tsub\tsp, sp, #%"PRIu64"\n" + "\tstp\tx29, x30, [sp, -16]!\n", + e->frame + ); + else if (e->frame <= 65535) + fprintf(e->f, + "\tmov\tx16, #%"PRIu64"\n" + "\tsub\tsp, sp, x16\n" + "\tstp\tx29, x30, [sp, -16]!\n", + e->frame + ); + else + fprintf(e->f, + "\tmov\tx16, #%"PRIu64"\n" + "\tmovk\tx16, #%"PRIu64", lsl #16\n" + "\tsub\tsp, sp, x16\n" + "\tstp\tx29, x30, [sp, -16]!\n", + e->frame & 0xFFFF, e->frame >> 16 + ); + fputs("\tmov\tx29, sp\n", e->f); + s = (e->frame - e->padding) / 4; + for (r=arm64_rclob; *r>=0; r++) + if (e->fn->reg & BIT(*r)) { + s -= 2; + i = &(Ins){.arg = {TMP(*r), SLOT(s)}}; + i->op = *r >= V0 ? Ostored : Ostorel; + emitins(i, e); + } + + for (lbl=0, b=e->fn->start; b; b=b->link) { + if (lbl || b->npred > 1) + fprintf(e->f, "%s%d:\n", T.asloc, id0+b->id); + for (i=b->ins; i!=&b->ins[b->nins]; i++) + emitins(i, e); + lbl = 1; + switch (b->jmp.type) { + case Jhlt: + fprintf(e->f, "\tbrk\t#1000\n"); + break; + case Jret0: + s = (e->frame - e->padding) / 4; + for (r=arm64_rclob; *r>=0; r++) + if (e->fn->reg & BIT(*r)) { + s -= 2; + i = &(Ins){Oload, 0, TMP(*r), {SLOT(s)}}; + i->cls = *r >= V0 ? Kd : Kl; + emitins(i, e); + } + if (e->fn->dynalloc) + fputs("\tmov sp, x29\n", e->f); + o = e->frame + 16; + if (e->fn->vararg && !T.apple) + o += 192; + if (o <= 504) + fprintf(e->f, + "\tldp\tx29, x30, [sp], %"PRIu64"\n", + o + ); + else if (o - 16 <= 4095) + fprintf(e->f, + "\tldp\tx29, x30, [sp], 16\n" + "\tadd\tsp, sp, #%"PRIu64"\n", + o - 16 + ); + else if (o - 16 <= 65535) + fprintf(e->f, + "\tldp\tx29, x30, [sp], 16\n" + "\tmov\tx16, #%"PRIu64"\n" + "\tadd\tsp, sp, x16\n", + o - 16 + ); + else + fprintf(e->f, + "\tldp\tx29, x30, [sp], 16\n" + "\tmov\tx16, #%"PRIu64"\n" + "\tmovk\tx16, #%"PRIu64", lsl #16\n" + "\tadd\tsp, sp, x16\n", + (o - 16) & 0xFFFF, (o - 16) >> 16 + ); + fprintf(e->f, "\tret\n"); + break; + case Jjmp: + Jmp: + if (b->s1 != b->link) + fprintf(e->f, + "\tb\t%s%d\n", + T.asloc, id0+b->s1->id + ); + else + lbl = 0; + break; + default: + c = b->jmp.type - Jjf; + if (c < 0 || c > NCmp) + die("unhandled jump %d", b->jmp.type); + if (b->link == b->s2) { + t = b->s1; + b->s1 = b->s2; + b->s2 = t; + } else + c = cmpneg(c); + fprintf(e->f, + "\tb%s\t%s%d\n", + ctoa[c], T.asloc, id0+b->s2->id + ); + goto Jmp; + } + } + id0 += e->fn->nblk; + if (!T.apple) + elf_emitfnfin(fn->name, out); +} diff --git a/src/qbe/arm64/isel.c b/src/qbe/arm64/isel.c new file mode 100644 index 00000000..ff3b30b0 --- /dev/null +++ b/src/qbe/arm64/isel.c @@ -0,0 +1,316 @@ +#include "all.h" + +enum Imm { + Iother, + Iplo12, + Iphi12, + Iplo24, + Inlo12, + Inhi12, + Inlo24 +}; + +static enum Imm +imm(Con *c, int k, int64_t *pn) +{ + int64_t n; + int i; + + if (c->type != CBits) + return Iother; + n = c->bits.i; + if (k == Kw) + n = (int32_t)n; + i = Iplo12; + if (n < 0) { + i = Inlo12; + n = -(uint64_t)n; + } + *pn = n; + if ((n & 0x000fff) == n) + return i; + if ((n & 0xfff000) == n) + return i + 1; + if ((n & 0xffffff) == n) + return i + 2; + return Iother; +} + +int +arm64_logimm(uint64_t x, int k) +{ + uint64_t n; + + if (k == Kw) + x = (x & 0xffffffff) | x << 32; + if (x & 1) + x = ~x; + if (x == 0) + return 0; + if (x == 0xaaaaaaaaaaaaaaaa) + return 1; + n = x & 0xf; + if (0x1111111111111111 * n == x) + goto Check; + n = x & 0xff; + if (0x0101010101010101 * n == x) + goto Check; + n = x & 0xffff; + if (0x0001000100010001 * n == x) + goto Check; + n = x & 0xffffffff; + if (0x0000000100000001 * n == x) + goto Check; + n = x; +Check: + return (n & (n + (n & -n))) == 0; +} + +static void +fixarg(Ref *pr, int k, int phi, Fn *fn) +{ + char buf[32]; + Con *c, cc; + Ref r0, r1, r2, r3; + int s, n; + + r0 = *pr; + switch (rtype(r0)) { + case RCon: + c = &fn->con[r0.val]; + if (T.apple + && c->type == CAddr + && c->sym.type == SThr) { + r1 = newtmp("isel", Kl, fn); + *pr = r1; + if (c->bits.i) { + r2 = newtmp("isel", Kl, fn); + cc = (Con){.type = CBits}; + cc.bits.i = c->bits.i; + r3 = newcon(&cc, fn); + emit(Oadd, Kl, r1, r2, r3); + r1 = r2; + } + emit(Ocopy, Kl, r1, TMP(R0), R); + r1 = newtmp("isel", Kl, fn); + r2 = newtmp("isel", Kl, fn); + emit(Ocall, 0, R, r1, CALL(33)); + emit(Ocopy, Kl, TMP(R0), r2, R); + emit(Oload, Kl, r1, r2, R); + cc = *c; + cc.bits.i = 0; + r3 = newcon(&cc, fn); + emit(Ocopy, Kl, r2, r3, R); + break; + } + if (KBASE(k) == 0 && phi) + return; + r1 = newtmp("isel", k, fn); + if (KBASE(k) == 0) { + emit(Ocopy, k, r1, r0, R); + } else { + n = stashbits(c->bits.i, KWIDE(k) ? 8 : 4); + vgrow(&fn->con, ++fn->ncon); + c = &fn->con[fn->ncon-1]; + sprintf(buf, "\"%sfp%d\"", T.asloc, n); + *c = (Con){.type = CAddr}; + c->sym.id = intern(buf); + r2 = newtmp("isel", Kl, fn); + emit(Oload, k, r1, r2, R); + emit(Ocopy, Kl, r2, CON(c-fn->con), R); + } + *pr = r1; + break; + case RTmp: + s = fn->tmp[r0.val].slot; + if (s == -1) + break; + r1 = newtmp("isel", Kl, fn); + emit(Oaddr, Kl, r1, SLOT(s), R); + *pr = r1; + break; + } +} + +static int +selcmp(Ref arg[2], int k, Fn *fn) +{ + Ref r, *iarg; + Con *c; + int swap, cmp, fix; + int64_t n; + + if (KBASE(k) == 1) { + emit(Oafcmp, k, R, arg[0], arg[1]); + iarg = curi->arg; + fixarg(&iarg[0], k, 0, fn); + fixarg(&iarg[1], k, 0, fn); + return 0; + } + swap = rtype(arg[0]) == RCon; + if (swap) { + r = arg[1]; + arg[1] = arg[0]; + arg[0] = r; + } + fix = 1; + cmp = Oacmp; + r = arg[1]; + if (rtype(r) == RCon) { + c = &fn->con[r.val]; + switch (imm(c, k, &n)) { + default: + break; + case Iplo12: + case Iphi12: + fix = 0; + break; + case Inlo12: + case Inhi12: + cmp = Oacmn; + r = getcon(n, fn); + fix = 0; + break; + } + } + emit(cmp, k, R, arg[0], r); + iarg = curi->arg; + fixarg(&iarg[0], k, 0, fn); + if (fix) + fixarg(&iarg[1], k, 0, fn); + return swap; +} + +static int +callable(Ref r, Fn *fn) +{ + Con *c; + + if (rtype(r) == RTmp) + return 1; + if (rtype(r) == RCon) { + c = &fn->con[r.val]; + if (c->type == CAddr) + if (c->bits.i == 0) + return 1; + } + return 0; +} + +static void +sel(Ins i, Fn *fn) +{ + Ref *iarg; + Ins *i0; + int ck, cc; + + if (INRANGE(i.op, Oalloc, Oalloc1)) { + i0 = curi - 1; + salloc(i.to, i.arg[0], fn); + fixarg(&i0->arg[0], Kl, 0, fn); + return; + } + if (iscmp(i.op, &ck, &cc)) { + emit(Oflag, i.cls, i.to, R, R); + i0 = curi; + if (selcmp(i.arg, ck, fn)) + i0->op += cmpop(cc); + else + i0->op += cc; + return; + } + if (i.op == Ocall) + if (callable(i.arg[0], fn)) { + emiti(i); + return; + } + if (i.op != Onop) { + emiti(i); + iarg = curi->arg; /* fixarg() can change curi */ + fixarg(&iarg[0], argcls(&i, 0), 0, fn); + fixarg(&iarg[1], argcls(&i, 1), 0, fn); + } +} + +static void +seljmp(Blk *b, Fn *fn) +{ + Ref r; + Ins *i, *ir; + int ck, cc, use; + + if (b->jmp.type == Jret0 + || b->jmp.type == Jjmp + || b->jmp.type == Jhlt) + return; + assert(b->jmp.type == Jjnz); + r = b->jmp.arg; + use = -1; + b->jmp.arg = R; + ir = 0; + i = &b->ins[b->nins]; + while (i > b->ins) + if (req((--i)->to, r)) { + use = fn->tmp[r.val].nuse; + ir = i; + break; + } + if (ir && use == 1 + && iscmp(ir->op, &ck, &cc)) { + if (selcmp(ir->arg, ck, fn)) + cc = cmpop(cc); + b->jmp.type = Jjf + cc; + *ir = (Ins){.op = Onop}; + } + else { + selcmp((Ref[]){r, CON_Z}, Kw, fn); + b->jmp.type = Jjfine; + } +} + +void +arm64_isel(Fn *fn) +{ + Blk *b, **sb; + Ins *i; + Phi *p; + uint n, al; + int64_t sz; + + /* assign slots to fast allocs */ + b = fn->start; + /* specific to NAlign == 3 */ /* or change n=4 and sz /= 4 below */ + for (al=Oalloc, n=4; al<=Oalloc1; al++, n*=2) + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op == al) { + if (rtype(i->arg[0]) != RCon) + break; + sz = fn->con[i->arg[0].val].bits.i; + if (sz < 0 || sz >= INT_MAX-15) + err("invalid alloc size %"PRId64, sz); + sz = (sz + n-1) & -n; + sz /= 4; + fn->tmp[i->to.val].slot = fn->slot; + fn->slot += sz; + *i = (Ins){.op = Onop}; + } + + for (b=fn->start; b; b=b->link) { + curi = &insb[NIns]; + for (sb=(Blk*[3]){b->s1, b->s2, 0}; *sb; sb++) + for (p=(*sb)->phi; p; p=p->link) { + for (n=0; p->blk[n] != b; n++) + assert(n+1 < p->narg); + fixarg(&p->arg[n], p->cls, 1, fn); + } + seljmp(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) + sel(*--i, fn); + idup(b, curi, &insb[NIns]-curi); + } + + if (debug['I']) { + fprintf(stderr, "\n> After instruction selection:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/arm64/targ.c b/src/qbe/arm64/targ.c new file mode 100644 index 00000000..8f1e1493 --- /dev/null +++ b/src/qbe/arm64/targ.c @@ -0,0 +1,69 @@ +#include "all.h" + +int arm64_rsave[] = { + R0, R1, R2, R3, R4, R5, R6, R7, + R8, R9, R10, R11, R12, R13, R14, R15, + IP0, IP1, R18, LR, + V0, V1, V2, V3, V4, V5, V6, V7, + V16, V17, V18, V19, V20, V21, V22, V23, + V24, V25, V26, V27, V28, V29, V30, + -1 +}; +int arm64_rclob[] = { + R19, R20, R21, R22, R23, R24, R25, R26, + R27, R28, + V8, V9, V10, V11, V12, V13, V14, V15, + -1 +}; + +#define RGLOB (BIT(FP) | BIT(SP) | BIT(IP1) | BIT(R18)) + +static int +arm64_memargs(int op) +{ + (void)op; + return 0; +} + +#define ARM64_COMMON \ + .gpr0 = R0, \ + .ngpr = NGPR, \ + .fpr0 = V0, \ + .nfpr = NFPR, \ + .rglob = RGLOB, \ + .nrglob = 4, \ + .rsave = arm64_rsave, \ + .nrsave = {NGPS, NFPS}, \ + .retregs = arm64_retregs, \ + .argregs = arm64_argregs, \ + .memargs = arm64_memargs, \ + .isel = arm64_isel, \ + .abi1 = arm64_abi, \ + .emitfn = arm64_emitfn, \ + .cansel = 0, \ + +Target T_arm64 = { + .name = "arm64", + .abi0 = elimsb, + .emitfin = elf_emitfin, + .asloc = ".L", + ARM64_COMMON +}; + +Target T_arm64_apple = { + .name = "arm64_apple", + .apple = 1, + .abi0 = apple_extsb, + .emitfin = macho_emitfin, + .asloc = "L", + .assym = "_", + ARM64_COMMON +}; + +MAKESURE(globals_are_not_arguments, + (RGLOB & (BIT(R8+1) - 1)) == 0 +); +MAKESURE(arrays_size_ok, + sizeof arm64_rsave == (NGPS+NFPS+1) * sizeof(int) && + sizeof arm64_rclob == (NCLR+1) * sizeof(int) +); diff --git a/src/qbe/cfg.c b/src/qbe/cfg.c new file mode 100644 index 00000000..6d160f01 --- /dev/null +++ b/src/qbe/cfg.c @@ -0,0 +1,567 @@ +#include "all.h" + +Blk * +newblk() +{ + static Blk z; + Blk *b; + + b = alloc(sizeof *b); + *b = z; + b->ins = vnew(0, sizeof b->ins[0], PFn); + b->pred = vnew(0, sizeof b->pred[0], PFn); + return b; +} + +static void +fixphis(Fn *f) +{ + Blk *b, *bp; + Phi *p; + uint n, n0; + + for (b=f->start; b; b=b->link) { + assert(b->id < f->nblk); + for (p=b->phi; p; p=p->link) { + for (n=n0=0; nnarg; n++) { + bp = p->blk[n]; + if (bp->id != -1u) + if (bp->s1 == b || bp->s2 == b) { + p->blk[n0] = bp; + p->arg[n0] = p->arg[n]; + n0++; + } + } + assert(n0 > 0); + p->narg = n0; + } + } +} + +static void +addpred(Blk *bp, Blk *b) +{ + vgrow(&b->pred, ++b->npred); + b->pred[b->npred-1] = bp; +} + +void +fillpreds(Fn *f) +{ + Blk *b; + + for (b=f->start; b; b=b->link) + b->npred = 0; + for (b=f->start; b; b=b->link) { + if (b->s1) + addpred(b, b->s1); + if (b->s2 && b->s2 != b->s1) + addpred(b, b->s2); + } +} + +static void +porec(Blk *b, uint *npo) +{ + Blk *s1, *s2; + + if (!b || b->id != -1u) + return; + b->id = 0; /* marker */ + s1 = b->s1; + s2 = b->s2; + if (s1 && s2 && s1->loop > s2->loop) { + s1 = b->s2; + s2 = b->s1; + } + porec(s1, npo); + porec(s2, npo); + b->id = (*npo)++; +} + +static void +fillrpo(Fn *f) +{ + Blk *b, **p; + + for (b=f->start; b; b=b->link) + b->id = -1u; + f->nblk = 0; + porec(f->start, &f->nblk); + vgrow(&f->rpo, f->nblk); + for (p=&f->start; (b=*p);) { + if (b->id == -1u) { + *p = b->link; + } else { + b->id = f->nblk-b->id-1; + f->rpo[b->id] = b; + p = &b->link; + } + } +} + +/* fill rpo, preds; prune dead blks */ +void +fillcfg(Fn *f) +{ + fillrpo(f); + fillpreds(f); + fixphis(f); +} + +/* for dominators computation, read + * "A Simple, Fast Dominance Algorithm" + * by K. Cooper, T. Harvey, and K. Kennedy. + */ + +static Blk * +inter(Blk *b1, Blk *b2) +{ + Blk *bt; + + if (b1 == 0) + return b2; + while (b1 != b2) { + if (b1->id < b2->id) { + bt = b1; + b1 = b2; + b2 = bt; + } + while (b1->id > b2->id) { + b1 = b1->idom; + assert(b1); + } + } + return b1; +} + +void +filldom(Fn *fn) +{ + Blk *b, *d; + int ch; + uint n, p; + + for (b=fn->start; b; b=b->link) { + b->idom = 0; + b->dom = 0; + b->dlink = 0; + } + do { + ch = 0; + for (n=1; nnblk; n++) { + b = fn->rpo[n]; + d = 0; + for (p=0; pnpred; p++) + if (b->pred[p]->idom + || b->pred[p] == fn->start) + d = inter(d, b->pred[p]); + if (d != b->idom) { + ch++; + b->idom = d; + } + } + } while (ch); + for (b=fn->start; b; b=b->link) + if ((d=b->idom)) { + assert(d != b); + b->dlink = d->dom; + d->dom = b; + } +} + +int +sdom(Blk *b1, Blk *b2) +{ + assert(b1 && b2); + if (b1 == b2) + return 0; + while (b2->id > b1->id) + b2 = b2->idom; + return b1 == b2; +} + +int +dom(Blk *b1, Blk *b2) +{ + return b1 == b2 || sdom(b1, b2); +} + +static void +addfron(Blk *a, Blk *b) +{ + uint n; + + for (n=0; nnfron; n++) + if (a->fron[n] == b) + return; + if (!a->nfron) + a->fron = vnew(++a->nfron, sizeof a->fron[0], PFn); + else + vgrow(&a->fron, ++a->nfron); + a->fron[a->nfron-1] = b; +} + +/* fill the dominance frontier */ +void +fillfron(Fn *fn) +{ + Blk *a, *b; + + for (b=fn->start; b; b=b->link) + b->nfron = 0; + for (b=fn->start; b; b=b->link) { + if (b->s1) + for (a=b; !sdom(a, b->s1); a=a->idom) + addfron(a, b->s1); + if (b->s2) + for (a=b; !sdom(a, b->s2); a=a->idom) + addfron(a, b->s2); + } +} + +static void +loopmark(Blk *hd, Blk *b, void f(Blk *, Blk *)) +{ + uint p; + + if (b->id < hd->id || b->visit == hd->id) + return; + b->visit = hd->id; + f(hd, b); + for (p=0; pnpred; ++p) + loopmark(hd, b->pred[p], f); +} + +void +loopiter(Fn *fn, void f(Blk *, Blk *)) +{ + uint n, p; + Blk *b; + + for (b=fn->start; b; b=b->link) + b->visit = -1u; + for (n=0; nnblk; ++n) { + b = fn->rpo[n]; + for (p=0; pnpred; ++p) + if (b->pred[p]->id >= n) + loopmark(b, b->pred[p], f); + } +} + +/* dominator tree depth */ +void +filldepth(Fn *fn) +{ + Blk *b, *d; + int depth; + + for (b=fn->start; b; b=b->link) + b->depth = -1; + + fn->start->depth = 0; + + for (b=fn->start; b; b=b->link) { + if (b->depth != -1) + continue; + depth = 1; + for (d=b->idom; d->depth==-1; d=d->idom) + depth++; + depth += d->depth; + b->depth = depth; + for (d=b->idom; d->depth==-1; d=d->idom) + d->depth = --depth; + } +} + +/* least common ancestor in dom tree */ +Blk * +lca(Blk *b1, Blk *b2) +{ + if (!b1) + return b2; + if (!b2) + return b1; + while (b1->depth > b2->depth) + b1 = b1->idom; + while (b2->depth > b1->depth) + b2 = b2->idom; + while (b1 != b2) { + b1 = b1->idom; + b2 = b2->idom; + } + return b1; +} + +void +multloop(Blk *hd, Blk *b) +{ + (void)hd; + b->loop *= 10; +} + +void +fillloop(Fn *fn) +{ + Blk *b; + + for (b=fn->start; b; b=b->link) + b->loop = 1; + loopiter(fn, multloop); +} + +static void +uffind(Blk **pb, Blk **uf) +{ + Blk **pb1; + + pb1 = &uf[(*pb)->id]; + if (*pb1) { + uffind(pb1, uf); + *pb = *pb1; + } +} + +/* requires rpo and no phis, breaks cfg */ +void +simpljmp(Fn *fn) +{ + + Blk **uf; /* union-find */ + Blk **p, *b, *ret; + + ret = newblk(); + ret->id = fn->nblk++; + ret->jmp.type = Jret0; + uf = emalloc(fn->nblk * sizeof uf[0]); + for (b=fn->start; b; b=b->link) { + assert(!b->phi); + if (b->jmp.type == Jret0) { + b->jmp.type = Jjmp; + b->s1 = ret; + } + if (b->nins == 0) + if (b->jmp.type == Jjmp) { + uffind(&b->s1, uf); + if (b->s1 != b) + uf[b->id] = b->s1; + } + } + for (p=&fn->start; (b=*p); p=&b->link) { + if (b->s1) + uffind(&b->s1, uf); + if (b->s2) + uffind(&b->s2, uf); + if (b->s1 && b->s1 == b->s2) { + b->jmp.type = Jjmp; + b->s2 = 0; + } + } + *p = ret; + free(uf); +} + +static int +reachrec(Blk *b, Blk *to) +{ + if (b == to) + return 1; + if (!b || b->visit) + return 0; + + b->visit = 1; + if (reachrec(b->s1, to)) + return 1; + if (reachrec(b->s2, to)) + return 1; + + return 0; +} + +/* Blk.visit needs to be clear at entry */ +int +reaches(Fn *fn, Blk *b, Blk *to) +{ + int r; + + assert(to); + r = reachrec(b, to); + for (b=fn->start; b; b=b->link) + b->visit = 0; + return r; +} + +/* can b reach 'to' not through excl + * Blk.visit needs to be clear at entry */ +int +reachesnotvia(Fn *fn, Blk *b, Blk *to, Blk *excl) +{ + excl->visit = 1; + return reaches(fn, b, to); +} + +int +ifgraph(Blk *ifb, Blk **pthenb, Blk **pelseb, Blk **pjoinb) +{ + Blk *s1, *s2, **t; + + if (ifb->jmp.type != Jjnz) + return 0; + + s1 = ifb->s1; + s2 = ifb->s2; + if (s1->id > s2->id) { + s1 = ifb->s2; + s2 = ifb->s1; + t = pthenb; + pthenb = pelseb; + pelseb = t; + } + if (s1 == s2) + return 0; + + if (s1->jmp.type != Jjmp || s1->npred != 1) + return 0; + + if (s1->s1 == s2) { + /* if-then / if-else */ + if (s2->npred != 2) + return 0; + *pthenb = s1; + *pelseb = ifb; + *pjoinb = s2; + return 1; + } + + if (s2->jmp.type != Jjmp || s2->npred != 1) + return 0; + if (s1->s1 != s2->s1 || s1->s1->npred != 2) + return 0; + + assert(s1->s1 != ifb); + *pthenb = s1; + *pelseb = s2; + *pjoinb = s1->s1; + return 1; +} + +typedef struct Jmp Jmp; + +struct Jmp { + int type; + Ref arg; + Blk *s1, *s2; +}; + +static int +jmpeq(Jmp *a, Jmp *b) +{ + return a->type == b->type && req(a->arg, b->arg) + && a->s1 == b->s1 && a->s2 == b->s2; +} + +static int +jmpnophi(Jmp *j) +{ + if (j->s1 && j->s1->phi) + return 0; + if (j->s2 && j->s2->phi) + return 0; + return 1; +} + +/* require cfg rpo, breaks use */ +void +simplcfg(Fn *fn) +{ + Ins cpy, *i; + Blk *b, *bb, **pb; + Jmp *jmp, *j, *jj; + Phi *p; + int *empty, done; + uint n; + + if (debug['C']) { + fprintf(stderr, "\n> Before CFG simplification:\n"); + printfn(fn, stderr); + } + + cpy = (Ins){.op = Ocopy}; + for (b=fn->start; b; b=b->link) + if (b->npred == 1) { + bb = b->pred[0]; + for (p=b->phi; p; p=p->link) { + cpy.cls = p->cls; + cpy.to = p->to; + cpy.arg[0] = phiarg(p, bb); + addins(&bb->ins, &bb->nins, &cpy); + } + b->phi = 0; + } + + jmp = emalloc(fn->nblk * sizeof jmp[0]); + empty = emalloc(fn->nblk * sizeof empty[0]); + for (b=fn->start; b; b=b->link) { + jmp[b->id].type = b->jmp.type; + jmp[b->id].arg = b->jmp.arg; + jmp[b->id].s1 = b->s1; + jmp[b->id].s2 = b->s2; + empty[b->id] = !b->phi; + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op != Onop && i->op != Odbgloc) { + empty[b->id] = 0; + break; + } + } + + do { + done = 1; + for (b=fn->start; b; b=b->link) { + if (b->id == -1u) + continue; + j = &jmp[b->id]; + if (j->type == Jjmp && j->s1->npred == 1) { + assert(!j->s1->phi); + addbins(&b->ins, &b->nins, j->s1); + empty[b->id] &= empty[j->s1->id]; + jj = &jmp[j->s1->id]; + pb = (Blk*[]){jj->s1, jj->s2, 0}; + for (; (bb=*pb); pb++) + for (p=bb->phi; p; p=p->link) { + n = phiargn(p, j->s1); + p->blk[n] = b; + } + j->s1->id = -1u; + *j = *jj; + done = 0; + } + else if (j->type == Jjnz + && empty[j->s1->id] && empty[j->s2->id] + && jmpeq(&jmp[j->s1->id], &jmp[j->s2->id]) + && jmpnophi(&jmp[j->s1->id])) { + *j = jmp[j->s1->id]; + done = 0; + } + } + } while (!done); + + for (b=fn->start; b; b=b->link) + if (b->id != -1u) { + j = &jmp[b->id]; + b->jmp.type = j->type; + b->jmp.arg = j->arg; + b->s1 = j->s1; + b->s2 = j->s2; + assert(!j->s1 || j->s1->id != -1u); + assert(!j->s2 || j->s2->id != -1u); + } + + fillcfg(fn); + free(empty); + free(jmp); + + if (debug['C']) { + fprintf(stderr, "\n> After CFG simplification:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/copy.c b/src/qbe/copy.c new file mode 100644 index 00000000..43b9490d --- /dev/null +++ b/src/qbe/copy.c @@ -0,0 +1,408 @@ +#include "all.h" + +typedef struct Ext Ext; + +struct Ext { + char zext; + char nopw; /* is a no-op if arg width is <= nopw */ + char usew; /* uses only the low usew bits of arg */ +}; + +static int +ext(Ins *i, Ext *e) +{ + static Ext tbl[] = { + /*extsb*/ {0, 7, 8}, + /*extub*/ {1, 8, 8}, + /*extsh*/ {0, 15, 16}, + /*extuh*/ {1, 16, 16}, + /*extsw*/ {0, 31, 32}, + /*extuw*/ {1, 32, 32}, + }; + + if (!isext(i->op)) + return 0; + *e = tbl[i->op - Oextsb]; + return 1; +} + +static int +bitwidth(uint64_t v) +{ + int n; + + n = 0; + if (v >> 32) { n += 32; v >>= 32; } + if (v >> 16) { n += 16; v >>= 16; } + if (v >> 8) { n += 8; v >>= 8; } + if (v >> 4) { n += 4; v >>= 4; } + if (v >> 2) { n += 2; v >>= 2; } + if (v >> 1) { n += 1; v >>= 1; } + return n+v; +} + +/* no more than w bits are used */ +static int +usewidthle(Fn *fn, Ref r, int w) +{ + Ext e; + Tmp *t; + Use *u; + Phi *p; + Ins *i; + Ref rc; + int64_t v; + int b; + + assert(rtype(r) == RTmp); + t = &fn->tmp[r.val]; + for (u=t->use; u<&t->use[t->nuse]; u++) { + switch (u->type) { + case UPhi: + p = u->u.phi; + /* during gvn, phi nodes may be + * replaced by other temps; in + * this case, the replaced phi + * uses are added to the + * replacement temp uses and + * Phi.to is set to R */ + if (p->visit || req(p->to, R)) + continue; + p->visit = 1; + b = usewidthle(fn, p->to, w); + p->visit = 0; + if (b) + continue; + break; + case UIns: + i = u->u.ins; + assert(i != 0); + if (i->op == Ocopy) + if (usewidthle(fn, i->to, w)) + continue; + if (ext(i, &e)) { + if (e.usew <= w) + continue; + if (usewidthle(fn, i->to, w)) + continue; + } + if (i->op == Oand) { + if (req(r, i->arg[0])) + rc = i->arg[1]; + else { + assert(req(r, i->arg[1])); + rc = i->arg[0]; + } + if (isconbits(fn, rc, &v) + && bitwidth(v) <= w) + continue; + break; + } + break; + default: + break; + } + return 0; + } + return 1; +} + +static int +min(int v1, int v2) +{ + return v1 < v2 ? v1 : v2; +} + +/* is the ref narrower than w bits */ +static int +defwidthle(Fn *fn, Ref r, int w) +{ + Ext e; + Tmp *t; + Phi *p; + Ins *i; + uint n; + int64_t v; + int x; + + if (isconbits(fn, r, &v) + && bitwidth(v) <= w) + return 1; + if (rtype(r) != RTmp) + return 0; + t = &fn->tmp[r.val]; + if (t->cls != Kw) + return 0; + + if (!t->def) { + /* phi def */ + for (p=fn->rpo[t->bid]->phi; p; p=p->link) + if (req(p->to, r)) + break; + assert(p); + if (p->visit) + return 1; + p->visit = 1; + for (n=0; nnarg; n++) + if (!defwidthle(fn, p->arg[n], w)) { + p->visit = 0; + return 0; + } + p->visit = 0; + return 1; + } + + i = t->def; + if (i->op == Ocopy) + return defwidthle(fn, i->arg[0], w); + if (i->op == Oshr || i->op == Osar) { + if (isconbits(fn, i->arg[1], &v)) + if (0 < v && v <= 32) { + if (i->op == Oshr && w+v >= 32) + return 1; + if (w < 32) { + if (i->op == Osar) + w = min(31, w+v); + else + w = min(32, w+v); + } + } + return defwidthle(fn, i->arg[0], w); + } + if (iscmp(i->op, &x, &x)) + return w >= 1; + if (i->op == Oand) { + if (defwidthle(fn, i->arg[0], w) + || defwidthle(fn, i->arg[1], w)) + return 1; + return 0; + } + if (i->op == Oor || i->op == Oxor) { + if (defwidthle(fn, i->arg[0], w) + && defwidthle(fn, i->arg[1], w)) + return 1; + return 0; + } + if (ext(i, &e)) { + if (e.zext && e.usew <= w) + return 1; + w = min(w, e.nopw); + return defwidthle(fn, i->arg[0], w); + } + + return 0; +} + +static int +isw1(Fn *fn, Ref r) +{ + return defwidthle(fn, r, 1); +} + +/* insert early extub/extuh instructions + * for pars used only narrowly; this + * helps factoring extensions out of + * loops + * + * needs use; breaks use + */ +void +narrowpars(Fn *fn) +{ + Blk *b; + int loop; + Ins ext, *i, *ins; + uint npar, nins; + Ref r; + + /* only useful for functions with loops */ + loop = 0; + for (b=fn->start; b; b=b->link) + if (b->loop > 1) { + loop = 1; + break; + } + if (!loop) + return; + + b = fn->start; + + npar = 0; + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (!ispar(i->op)) + break; + npar++; + } + if (npar == 0) + return; + + nins = b->nins + npar; + ins = vnew(nins, sizeof ins[0], PFn); + icpy(ins, b->ins, npar); + icpy(ins + 2*npar, b->ins+npar, b->nins-npar); + b->ins = ins; + b->nins = nins; + + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (!ispar(i->op)) + break; + ext = (Ins){.op = Onop}; + if (i->cls == Kw) + if (usewidthle(fn, i->to, 16)) { + ext.op = Oextuh; + if (usewidthle(fn, i->to, 8)) + ext.op = Oextub; + r = newtmp("vw", i->cls, fn); + ext.cls = i->cls; + ext.to = i->to; + ext.arg[0] = r; + i->to = r; + } + *(i+npar) = ext; + } +} + +Ref +copyref(Fn *fn, Blk *b, Ins *i) +{ + /* which extensions are copies for a given + * argument width */ + static bits extcpy[] = { + [WFull] = 0, + [Wsb] = BIT(Wsb) | BIT(Wsh) | BIT(Wsw), + [Wub] = BIT(Wub) | BIT(Wuh) | BIT(Wuw), + [Wsh] = BIT(Wsh) | BIT(Wsw), + [Wuh] = BIT(Wuh) | BIT(Wuw), + [Wsw] = BIT(Wsw), + [Wuw] = BIT(Wuw), + }; + Ext e; + Tmp *t; + int64_t v; + int w, z; + + if (i->op == Ocopy) + return i->arg[0]; + + /* op identity value */ + if (optab[i->op].hasid + && KBASE(i->cls) == 0 /* integer only - fp NaN! */ + && req(i->arg[1], con01[optab[i->op].idval]) + && (!optab[i->op].cmpeqwl || isw1(fn, i->arg[0]))) + return i->arg[0]; + + /* idempotent op with identical args */ + if (optab[i->op].idemp + && req(i->arg[0], i->arg[1])) + return i->arg[0]; + + /* integer cmp with identical args */ + if ((optab[i->op].cmpeqwl || optab[i->op].cmplgtewl) + && req(i->arg[0], i->arg[1])) + return con01[optab[i->op].eqval]; + + /* cmpeq/ne 0 with 0/non-0 inference */ + if (optab[i->op].cmpeqwl + && req(i->arg[1], CON_Z) + && zeroval(fn, b, i->arg[0], argcls(i, 0), &z)) + return con01[optab[i->op].eqval^z^1]; + + /* redundant and mask */ + if (i->op == Oand + && isconbits(fn, i->arg[1], &v) + && (v > 0 && ((v+1) & v) == 0) + && defwidthle(fn, i->arg[0], bitwidth(v))) + return i->arg[0]; + + if (i->cls == Kw + && (i->op == Oextsw || i->op == Oextuw)) + return i->arg[0]; + + if (ext(i, &e) && rtype(i->arg[0]) == RTmp) { + t = &fn->tmp[i->arg[0].val]; + assert(KBASE(t->cls) == 0); + + /* do not break typing by returning + * a narrower temp */ + if (KWIDE(i->cls) > KWIDE(t->cls)) + return R; + + w = Wsb + (i->op - Oextsb); + if (BIT(w) & extcpy[t->width]) + return i->arg[0]; + + /* avoid eliding extensions of params + * inserted in the start block; their + * point is to make further extensions + * redundant */ + if ((!t->def || !ispar(t->def->op)) + && usewidthle(fn, i->to, e.usew)) + return i->arg[0]; + + if (defwidthle(fn, i->arg[0], e.nopw)) + return i->arg[0]; + } + + return R; +} + +static int +phieq(Phi *pa, Phi *pb) +{ + Ref r; + uint n; + + assert(pa->narg == pb->narg); + for (n=0; nnarg; n++) { + r = phiarg(pb, pa->blk[n]); + if (!req(pa->arg[n], r)) + return 0; + } + return 1; +} + +Ref +phicopyref(Fn *fn, Blk *b, Phi *p) +{ + Blk *d, **s; + Phi *p1; + uint n, c; + + /* identical args */ + for (n=0; nnarg-1; n++) + if (!req(p->arg[n], p->arg[n+1])) + break; + if (n == p->narg-1) + return p->arg[n]; + + /* same as a previous phi */ + for (p1=b->phi; p1!=p; p1=p1->link) { + assert(p1); + if (phieq(p1, p)) + return p1->to; + } + + /* can be replaced by a + * dominating jnz arg */ + d = b->idom; + if (p->narg != 2 + || d->jmp.type != Jjnz + || !isw1(fn, d->jmp.arg)) + return R; + + s = (Blk*[]){0, 0}; + for (n=0; n<2; n++) + for (c=0; c<2; c++) + if (req(p->arg[n], con01[c])) + s[c] = p->blk[n]; + + /* if s1 ends with a jnz on either b + * or s2; the inference below is wrong + * without the jump type checks */ + if (d->s1 == s[1] && d->s2 == s[0] + && d->s1->jmp.type == Jjmp + && d->s2->jmp.type == Jjmp) + return d->jmp.arg; + + return R; +} diff --git a/src/qbe/doc/abi.txt b/src/qbe/doc/abi.txt new file mode 100644 index 00000000..5f28d0ee --- /dev/null +++ b/src/qbe/doc/abi.txt @@ -0,0 +1,141 @@ + ================== + System V ABI AMD64 + ================== + + +This document describes concisely the subset of the amd64 +ABI as it is implemented in QBE. The subset can handle +correctly arbitrary standard C-like structs containing +float and integer types. Structs that have unaligned +members are also supported through opaque types, see +the IL description document for more information about +them. + + +- ABI Subset Implemented +------------------------ + +Data classes of interest as defined by the ABI: + * INTEGER + * SSE + * MEMORY + + +~ Classification + + 1. The size of each argument gets rounded up to eightbytes. + (It keeps the stack always 8 bytes aligned.) + 2. _Bool, char, short, int, long, long long and pointers + are in the INTEGER class. In the context of QBE, it + means that 'l' and 'w' are in the INTEGER class. + 3. float and double are in the SSE class. In the context + of QBE, it means that 's' and 'd' are in the SSE class. + 4. If the size of an object is larger than two eightbytes + or if contains unaligned fields, it has class MEMORY. + In the context of QBE, those are big aggregate types + and opaque types. + 5. Otherwise, recursively classify fields and determine + the class of the two eightbytes using the classes of + their components. If any is INTEGER the result is + INTEGER, otherwise the result is SSE. + +~ Passing + + * Classify arguments in order. + * INTEGER arguments use in order `%rdi` `%rsi` `%rdx` + `%rcx` `%r8` `%r9`. + * SSE arguments use in order `%xmm0` - `%xmm7`. + * MEMORY gets passed on the stack. They are "pushed" + in the right-to-left order, so from the callee's + point of view, the left-most argument appears first + on the stack. + * When we run out of registers for an aggregate, revert + the assignment for the first eightbytes and pass it + on the stack. + * When all registers are taken, write arguments on the + stack from right to left. + * When calling a variadic function, %al stores the number + of vector registers used to pass arguments (it must be + an upper bound and does not have to be exact). + * Registers `%rbx`, `%r12` - `%r15` are callee-save. + +~ Returning + + * Classify the return type. + * Use `%rax` and `%rdx` in order for INTEGER return + values. + * Use `%xmm0` and `%xmm1` in order for SSE return values. + * If the return value's class is MEMORY, the first + argument of the function `%rdi` was a pointer to an + area big enough to fit the return value. The function + writes the return value there and returns the address + (that was in `%rdi`) in `%rax`. + + +- Alignment on the Stack +------------------------ + +The ABI is unclear on the alignment requirement of the +stack. What must be ensured is that, right before +executing a 'call' instruction, the stack pointer `%rsp` +is aligned on 16 bytes. On entry of the called +function, the stack pointer is 8 modulo 16. Since most +functions will have a prelude pushing `%rbp`, the frame +pointer, upon entry of the body code of the function is +also aligned on 16 bytes (== 0 mod 16). + +Here is a diagram of the stack layout after a call from +g() to f(). + + | | + | g() locals | + +-------------+ + ^ | | \ + | | stack arg 2 | ' + | |xxxxxxxxxxxxx| | f()'s MEMORY + growing | +-------------+ | arguments + addresses | | stack arg 1 | , + | |xxxxxxxxxxxxx| / + | +-------------+ -> 0 mod 16 + | | ret addr | + +-------------+ + | saved %rbp | + +-------------+ -> f()'s %rbp + | f() locals | 0 mod 16 + | ... | + -> %rsp + +Legend: + * `xxxxx` Optional padding. + + +- Remarks +--------- + + * A struct can be returned in registers in one of three + ways. Either `%rax`, `%rdx` are used, or `%xmm0`, + `%xmm1`, or finally `%rax`, `%xmm0`. The last case + happens when a struct is returned with one half + classified as INTEGER and the other as SSE. This + is a consequence of the <@Returning> section above. + + * The size of the arguments area of the stack needs to + be computed first, then arguments are packed starting + from the bottom of the argument area, respecting + alignment constraints. The ABI mentions "pushing" + arguments in right-to-left order, but I think it's a + mistaken view because of the alignment constraints. + + Example: If three 8 bytes MEMORY arguments are passed + to the callee and the caller's stack pointer is 16 bytes + algined, the layout will be like this. + + +-------------+ + |xxxxxxxxxxxxx| padding + | stack arg 3 | + | stack arg 2 | + | stack arg 1 | + +-------------+ -> 0 mod 16 + + The padding must not be at the end of the stack area. + A "pushing" logic would put it at the end. diff --git a/src/qbe/doc/il.txt b/src/qbe/doc/il.txt new file mode 100644 index 00000000..746a7d2f --- /dev/null +++ b/src/qbe/doc/il.txt @@ -0,0 +1,1196 @@ + =========================== + QBE Intermediate Language + =========================== + + + +- Table of Contents +------------------- + + 1. <@ Basic Concepts > + * <@ Input Files > + * <@ BNF Notation > + * <@ Sigils > + * <@ Spacing > + 2. <@ Types > + * <@ Simple Types > + * <@ Subtyping > + 3. <@ Constants and Vals > + 4. <@ Linkage > + 5. <@ Definitions > + * <@ Aggregate Types > + * <@ Data > + * <@ Functions > + 6. <@ Control > + * <@ Blocks > + * <@ Jumps > + 7. <@ Instructions > + * <@ Arithmetic and Bits > + * <@ Memory > + * <@ Comparisons > + * <@ Conversions > + * <@ Cast and Copy > + * <@ Call > + * <@ Variadic > + * <@ Phi > + 8. <@ Instructions Index > + +- 1. Basic Concepts +------------------- + +The intermediate language (IL) is a higher-level language +than the machine's assembly language. It smoothes most +of the irregularities of the underlying hardware and +allows an infinite number of temporaries to be used. +This higher abstraction level lets frontend programmers +focus on language design issues. + +~ Input Files +~~~~~~~~~~~~~ + +The intermediate language is provided to QBE as text. +Usually, one file is generated per each compilation unit from +the frontend input language. An IL file is a sequence of +<@ Definitions > for data, functions, and types. Once +processed by QBE, the resulting file can be assembled and +linked using a standard toolchain (e.g., GNU binutils). + +Here is a complete "Hello World" IL file which defines a +function that prints to the screen. Since the string is +not a first class object (only the pointer is) it is +defined outside the function's body. Comments start with +a # character and finish with the end of the line. + + # Define the string constant. + data $str = { b "hello world", b 0 } + + export function w $main() { + @start + # Call the puts function with $str as argument. + %r =w call $puts(l $str) + ret 0 + } + +If you have read the LLVM language reference, you might +recognize the example above. In comparison, QBE makes a +much lighter use of types and the syntax is terser. + +~ BNF Notation +~~~~~~~~~~~~~~ + +The language syntax is vaporously described in the sections +below using BNF syntax. The different BNF constructs used +are listed below. + + * Keywords are enclosed between quotes; + * `... | ...` expresses alternatives; + * `( ... )` groups syntax; + * `[ ... ]` marks the nested syntax as optional; + * `( ... ),` designates a comma-separated list of the + enclosed syntax; + * `...*` and `...+` are used for arbitrary and + at-least-once repetition respectively. + +~ Sigils +~~~~~~~~ + +The intermediate language makes heavy use of sigils, all +user-defined names are prefixed with a sigil. This is +to avoid keyword conflicts, and also to quickly spot the +scope and nature of identifiers. + + * `:` is for user-defined <@ Aggregate Types> + * `$` is for globals (represented by a pointer) + * `%` is for function-scope temporaries + * `@` is for block labels + +In this BNF syntax, we use `?IDENT` to designate an identifier +starting with the sigil `?`. + +~ Spacing +~~~~~~~~~ + + `bnf + NL := '\n'+ + +Individual tokens in IL files must be separated by one or +more spacing characters. Both spaces and tabs are recognized +as spacing characters. In data and type definitions, newlines +may also be used as spaces to prevent overly long lines. When +exactly one of two consecutive tokens is a symbol (for example +`,` or `=` or `{`), spacing may be omitted. + +- 2. Types +---------- + +~ Simple Types +~~~~~~~~~~~~~~ + + `bnf + BASETY := 'w' | 'l' | 's' | 'd' # Base types + EXTTY := BASETY | 'b' | 'h' # Extended types + +The IL makes minimal use of types. By design, the types +used are restricted to what is necessary for unambiguous +compilation to machine code and C interfacing. Unlike LLVM, +QBE is not using types as a means to safety; they are only +here for semantic purposes. + +The four base types are `w` (word), `l` (long), `s` (single), +and `d` (double), they stand respectively for 32-bit and +64-bit integers, and 32-bit and 64-bit floating-point numbers. +There are no pointer types available; pointers are typed +by an integer type sufficiently wide to represent all memory +addresses (e.g., `l` on 64-bit architectures). Temporaries +in the IL can only have a base type. + +Extended types contain base types plus `b` (byte) and `h` +(half word), respectively for 8-bit and 16-bit integers. +They are used in <@ Aggregate Types> and <@ Data> definitions. + +For C interfacing, the IL also provides user-defined aggregate +types as well as signed and unsigned variants of the sub-word +extended types. Read more about these types in the +<@ Aggregate Types > and <@ Functions > sections. + +~ Subtyping +~~~~~~~~~~~ + +The IL has a minimal subtyping feature, for integer types only. +Any value of type `l` can be used in a `w` context. In that +case, only the 32 least significant bits of the word value +are used. + +Make note that it is the opposite of the usual subtyping on +integers (in C, we can safely use an `int` where a `long` +is expected). A long value cannot be used in word context. +The rationale is that a word can be signed or unsigned, so +extending it to a long could be done in two ways, either +by zero-extension, or by sign-extension. + +- 3. Constants and Vals +----------------------- + + `bnf + CONST := + ['-'] NUMBER # Decimal integer + | 's_' FP # Single-precision float + | 'd_' FP # Double-precision float + | $IDENT # Global symbol + + DYNCONST := + CONST + | 'thread' $IDENT # Thread-local symbol + + VAL := + DYNCONST + | %IDENT + +Constants come in two kinds: compile-time constants and +dynamic constants. Dynamic constants include compile-time +constants and other symbol variants that are only known at +program-load time or execution time. Consequently, dynamic +constants can only occur in function bodies. + +The representation of integers is two's complement. +Floating-point numbers are represented using the +single-precision and double-precision formats of the +IEEE 754 standard. + +Constants specify a sequence of bits and are untyped. +They are always parsed as 64-bit blobs. Depending on +the context surrounding a constant, only some of its +bits are used. For example, in the program below, the +two variables defined have the same value since the first +operand of the subtraction is a word (32-bit) context. + + %x =w sub -1, 0 + %y =w sub 4294967295, 0 + +Because specifying floating-point constants by their bits +makes the code less readable, syntactic sugar is provided +to express them. Standard scientific notation is prefixed +with `s_` and `d_` for single and double precision numbers +respectively. Once again, the following example defines twice +the same double-precision constant. + + %x =d add d_0, d_-1 + %y =d add d_0, -4616189618054758400 + +Global symbols can also be used directly as constants; +they will be resolved and turned into actual numeric +constants by the linker. + +When the `thread` keyword prefixes a symbol name, the +symbol's numeric value is resolved at runtime in the +thread-local storage. + +Vals are used as arguments in regular, phi, and jump +instructions within function definitions. They are +either constants or function-scope temporaries. + +- 4. Linkage +------------ + + `bnf + LINKAGE := + 'export' [NL] + | 'thread' [NL] + | 'section' SECNAME [NL] + | 'section' SECNAME SECFLAGS [NL] + + SECNAME := '"' .... '"' + SECFLAGS := '"' .... '"' + +Function and data definitions (see below) can specify +linkage information to be passed to the assembler and +eventually to the linker. + +The `export` linkage flag marks the defined item as +visible outside the current file's scope. If absent, +the symbol can only be referred to locally. Functions +compiled by QBE and called from C need to be exported. + +The `thread` linkage flag can only qualify data +definitions. It mandates that the object defined is +stored in thread-local storage. Each time a runtime +thread starts, the supporting platform runtime is in +charge of making a new copy of the object for the +fresh thread. Objects in thread-local storage must +be accessed using the `thread $IDENT` syntax, as +specified in the <@ Constants and Vals > section. + +A `section` flag can be specified to tell the linker to +put the defined item in a certain section. The use of +the section flag is platform dependent and we refer the +user to the documentation of their assembler and linker +for relevant information. + + section ".init_array" + data $.init.f = { l $f } + +The section flag can be used to add function pointers to +a global initialization list, as depicted above. Note +that some platforms provide a BSS section that can be +used to minimize the footprint of uniformly zeroed data. +When this section is available, QBE will automatically +make use of it and no section flag is required. + +The section and export linkage flags should each appear +at most once in a definition. If multiple occurrences +are present, QBE is free to use any. + +- 5. Definitions +---------------- + +Definitions are the essential components of an IL file. +They can define three types of objects: aggregate types, +data, and functions. Aggregate types are never exported +and do not compile to any code. Data and function +definitions have file scope and are mutually recursive +(even across IL files). Their visibility can be controlled +using linkage flags. + +~ Aggregate Types +~~~~~~~~~~~~~~~~~ + + `bnf + TYPEDEF := + # Regular type + 'type' :IDENT '=' ['align' NUMBER] + '{' + ( SUBTY [NUMBER] ), + '}' + | # Union type + 'type' :IDENT '=' ['align' NUMBER] + '{' + ( + '{' + ( SUBTY [NUMBER] ), + '}' + )+ + '}' + | # Opaque type + 'type' :IDENT '=' 'align' NUMBER '{' NUMBER '}' + + SUBTY := EXTTY | :IDENT + +Aggregate type definitions start with the `type` keyword. +They have file scope, but types must be defined before being +referenced. The inner structure of a type is expressed by a +comma-separated list of types enclosed in curly braces. + + type :fourfloats = { s, s, d, d } + +For ease of IL generation, a trailing comma is tolerated by +the parser. In case many items of the same type are +sequenced (like in a C array), the shorter array syntax +can be used. + + type :abyteandmanywords = { b, w 100 } + +By default, the alignment of an aggregate type is the +maximum alignment of its members. The alignment can be +explicitly specified by the programmer. + + type :cryptovector = align 16 { w 4 } + +Union types allow the same chunk of memory to be used with +different layouts. They are defined by enclosing multiple +regular aggregate type bodies in a pair of curly braces. +Size and alignment of union types are set to the maximum size +and alignment of each variation or, in the case of alignment, +can be explicitly specified. + + type :un9 = { { b } { s } } + +Opaque types are used when the inner structure of an +aggregate cannot be specified; the alignment for opaque +types is mandatory. They are defined simply by enclosing +their size between curly braces. + + type :opaque = align 16 { 32 } + +~ Data +~~~~~~ + + `bnf + DATADEF := + LINKAGE* + 'data' $IDENT '=' ['align' NUMBER] + '{' + ( EXTTY DATAITEM+ + | 'z' NUMBER ), + '}' + + DATAITEM := + $IDENT ['+' NUMBER] # Symbol and offset + | '"' ... '"' # String + | CONST # Constant + +Data definitions express objects that will be emitted in the +compiled file. Their visibility and location in the compiled +artifact are controlled with linkage flags described in the +<@ Linkage > section. + +They define a global identifier (starting with the sigil +`$`), that will contain a pointer to the object specified +by the definition. + +Objects are described by a sequence of fields that start with +a type letter. This letter can either be an extended type, +or the `z` letter. If the letter used is an extended type, +the data item following specifies the bits to be stored in +the field. When several data items follow a letter, they +initialize multiple fields of the same size. + +The members of a struct will be packed. This means that +padding has to be emitted by the frontend when necessary. +Alignment of the whole data objects can be manually specified, +and when no alignment is provided, the maximum alignment from +the platform is used. + +When the `z` letter is used the number following indicates +the size of the field; the contents of the field are zero +initialized. It can be used to add padding between fields +or zero-initialize big arrays. + +Here are various examples of data definitions. + + # Three 32-bit values 1, 2, and 3 + # followed by a 0 byte. + data $a = { w 1 2 3, b 0 } + + # A thousand bytes 0 initialized. + data $b = { z 1000 } + + # An object containing two 64-bit + # fields, one with all bits sets and the + # other containing a pointer to the + # object itself. + data $c = { l -1, l $c } + +~ Functions +~~~~~~~~~~~ + + `bnf + FUNCDEF := + LINKAGE* + 'function' [ABITY] $IDENT '(' (PARAM), ')' [NL] + '{' NL + BLOCK+ + '}' + + PARAM := + ABITY %IDENT # Regular parameter + | 'env' %IDENT # Environment parameter (first) + | '...' # Variadic marker (last) + + SUBWTY := 'sb' | 'ub' | 'sh' | 'uh' # Sub-word types + ABITY := BASETY | SUBWTY | :IDENT + +Function definitions contain the actual code to emit in +the compiled file. They define a global symbol that +contains a pointer to the function code. This pointer +can be used in `call` instructions or stored in memory. + +The type given right before the function name is the +return type of the function. All return values of this +function must have this return type. If the return +type is missing, the function must not return any value. + +The parameter list is a comma separated list of distinct +temporary names prefixed by types. The types are used +to correctly implement C compatibility. When an argument +has an aggregate type, a pointer to the aggregate is passed +by the caller. In the example below, we have to use a load +instruction to get the value of the first (and only) +member of the struct. + + type :one = { w } + + function w $getone(:one %p) { + @start + %val =w loadw %p + ret %val + } + +If a function accepts or returns values that are smaller +than a word, such as `signed char` or `unsigned short` in C, +one of the sub-word type must be used. The sub-word types +`sb`, `ub`, `sh`, and `uh` stand, respectively, for signed +and unsigned 8-bit values, and signed and unsigned 16-bit +values. Parameters associated with a sub-word type of bit +width N only have their N least significant bits set and +have base type `w`. For example, the function + + function w $addbyte(w %a, sb %b) { + @start + %bw =w extsb %b + %val =w add %a, %bw + ret %val + } + +needs to sign-extend its second argument before the +addition. Dually, return values with sub-word types do +not need to be sign or zero extended. + +If the parameter list ends with `...`, the function is +a variadic function: it can accept a variable number of +arguments. To access the extra arguments provided by +the caller, use the `vastart` and `vaarg` instructions +described in the <@ Variadic > section. + +Optionally, the parameter list can start with an +environment parameter `env %e`. This special parameter is +a 64-bit integer temporary (i.e., of type `l`). If the +function does not use its environment parameter, callers +can safely omit it. This parameter is invisible to a C +caller: for example, the function + + export function w $add(env %e, w %a, w %b) { + @start + %c =w add %a, %b + ret %c + } + +must be given the C prototype `int add(int, int)`. +The intended use of this feature is to pass the +environment pointer of closures while retaining a +very good compatibility with C. The <@ Call > section +explains how to pass an environment parameter. + +Since global symbols are defined mutually recursive, +there is no need for function declarations: a function +can be referenced before its definition. +Similarly, functions from other modules can be used +without previous declaration. All the type information +necessary to compile a call is in the instruction itself. + +The syntax and semantics for the body of functions +are described in the <@ Control > section. + +- 6. Control +------------ + +The IL represents programs as textual transcriptions of +control flow graphs. The control flow is serialized as +a sequence of blocks of straight-line code which are +connected using jump instructions. + +~ Blocks +~~~~~~~~ + + `bnf + BLOCK := + @IDENT NL # Block label + ( PHI NL )* # Phi instructions + ( INST NL )* # Regular instructions + JUMP NL # Jump or return + +All blocks have a name that is specified by a label at +their beginning. Then follows a sequence of instructions +that have "fall-through" flow. Finally one jump terminates +the block. The jump can either transfer control to another +block of the same function or return; jumps are described +further below. + +The first block in a function must not be the target of +any jump in the program. If a jump to the function start +is needed, the frontend must insert an empty prelude block +at the beginning of the function. + +When one block jumps to the next block in the IL file, +it is not necessary to write the jump instruction, it +will be automatically added by the parser. For example +the start block in the example below jumps directly +to the loop block. + + function $loop() { + @start + @loop + %x =w phi @start 100, @loop %x1 + %x1 =w sub %x, 1 + jnz %x1, @loop, @end + @end + ret + } + +~ Jumps +~~~~~~~ + + `bnf + JUMP := + 'jmp' @IDENT # Unconditional + | 'jnz' VAL, @IDENT, @IDENT # Conditional + | 'ret' [VAL] # Return + | 'hlt' # Termination + +A jump instruction ends every block and transfers the +control to another program location. The target of +a jump must never be the first block in a function. +The three kinds of jumps available are described in +the following list. + + 1. Unconditional jump. + + Simply jumps to another block of the same function. + + 2. Conditional jump. + + When its word argument is non-zero, it jumps to its + first label argument; otherwise it jumps to the other + label. The argument must be of word type; because of + subtyping a long argument can be passed, but only its + least significant 32 bits will be compared to 0. + + 3. Function return. + + Terminates the execution of the current function, + optionally returning a value to the caller. The value + returned must be of the type given in the function + prototype. If the function prototype does not specify + a return type, no return value can be used. + + 4. Program termination. + + Terminates the execution of the program with a + target-dependent error. This instruction can be used + when it is expected that the execution never reaches + the end of the block it closes; for example, after + having called a function such as `exit()`. + +- 7. Instructions +----------------- + +Instructions are the smallest piece of code in the IL, they +form the body of <@ Blocks >. The IL uses a three-address +code, which means that one instruction computes an operation +between two operands and assigns the result to a third one. + +An instruction has both a name and a return type, this +return type is a base type that defines the size of the +instruction's result. The type of the arguments can be +unambiguously inferred using the instruction name and the +return type. For example, for all arithmetic instructions, +the type of the arguments is the same as the return type. +The two additions below are valid if `%y` is a word or a long +(because of <@ Subtyping >). + + %x =w add 0, %y + %z =w add %x, %x + +Some instructions, like comparisons and memory loads +have operand types that differ from their return types. +For instance, two floating points can be compared to give a +word result (0 if the comparison succeeds, 1 if it fails). + + %c =w cgts %a, %b + +In the example above, both operands have to have single type. +This is made explicit by the instruction suffix. + +The types of instructions are described below using a short +type string. A type string specifies all the valid return +types an instruction can have, its arity, and the type of +its arguments depending on its return type. + +Type strings begin with acceptable return types, then +follows, in parentheses, the possible types for the arguments. +If the N-th return type of the type string is used for an +instruction, the arguments must use the N-th type listed for +them in the type string. When an instruction does not have a +return type, the type string only contains the types of the +arguments. + +The following abbreviations are used. + + * `T` stands for `wlsd` + * `I` stands for `wl` + * `F` stands for `sd` + * `m` stands for the type of pointers on the target; on + 64-bit architectures it is the same as `l` + +For example, consider the type string `wl(F)`, it mentions +that the instruction has only one argument and that if the +return type used is long, the argument must be of type double. + +~ Arithmetic and Bits +~~~~~~~~~~~~~~~~~~~~~ + + * `add`, `sub`, `div`, `mul` -- `T(T,T)` + * `neg` -- `T(T)` + * `udiv`, `rem`, `urem` -- `I(I,I)` + * `or`, `xor`, `and` -- `I(I,I)` + * `sar`, `shr`, `shl` -- `I(I,ww)` + +The base arithmetic instructions in the first bullet are +available for all types, integers and floating points. + +When `div` is used with word or long return type, the +arguments are treated as signed. The unsigned integral +division is available as `udiv` instruction. When the +result of a division is not an integer, it is truncated +towards zero. + +The signed and unsigned remainder operations are available +as `rem` and `urem`. The sign of the remainder is the same +as the one of the dividend. Its magnitude is smaller than +the divisor one. These two instructions and `udiv` are only +available with integer arguments and result. + +Bitwise OR, AND, and XOR operations are available for both +integer types. Logical operations of typical programming +languages can be implemented using <@ Comparisons > and +<@ Jumps >. + +Shift instructions `sar`, `shr`, and `shl`, shift right or +left their first operand by the amount from the second +operand. The shifting amount is taken modulo the size of +the result type. Shifting right can either preserve the +sign of the value (using `sar`), or fill the newly freed +bits with zeroes (using `shr`). Shifting left always +fills the freed bits with zeroes. + +Remark that an arithmetic shift right (`sar`) is only +equivalent to a division by a power of two for non-negative +numbers. This is because the shift right "truncates" +towards minus infinity, while the division truncates +towards zero. + +~ Memory +~~~~~~~~ + + * Store instructions. + + * `stored` -- `(d,m)` + * `stores` -- `(s,m)` + * `storel` -- `(l,m)` + * `storew` -- `(w,m)` + * `storeh` -- `(w,m)` + * `storeb` -- `(w,m)` + + Store instructions exist to store a value of any base type + and any extended type. Since halfwords and bytes are not + first class in the IL, `storeh` and `storeb` take a word + as argument. Only the first 16 or 8 bits of this word will + be stored in memory at the address specified in the second + argument. + + * Load instructions. + + * `loadd` -- `d(m)` + * `loads` -- `s(m)` + * `loadl` -- `l(m)` + * `loadsw`, `loaduw` -- `I(mm)` + * `loadsh`, `loaduh` -- `I(mm)` + * `loadsb`, `loadub` -- `I(mm)` + + For types smaller than long, two variants of the load + instruction are available: one will sign extend the loaded + value, while the other will zero extend it. Note that + all loads smaller than long can load to either a long or + a word. + + The two instructions `loadsw` and `loaduw` have the same + effect when they are used to define a word temporary. + A `loadw` instruction is provided as syntactic sugar for + `loadsw` to make explicit that the extension mechanism + used is irrelevant. + + * Blits. + + * `blit` -- `(m,m,w)` + + The blit instruction copies in-memory data from its + first address argument to its second address argument. + The third argument is the number of bytes to copy. The + source and destination spans are required to be either + non-overlapping, or fully overlapping (source address + identical to the destination address). The byte count + argument must be a nonnegative numeric constant; it + cannot be a temporary. + + One blit instruction may generate a number of + instructions proportional to its byte count argument, + consequently, it is recommended to keep this argument + relatively small. If large copies are necessary, it is + preferable that frontends generate calls to a supporting + `memcpy` function. + + * Stack allocation. + + * `alloc4` -- `m(l)` + * `alloc8` -- `m(l)` + * `alloc16` -- `m(l)` + + These instructions allocate a chunk of memory on the + stack. The number ending the instruction name is the + alignment required for the allocated slot. QBE will + make sure that the returned address is a multiple of + that alignment value. + + Stack allocation instructions are used, for example, + when compiling the C local variables, because their + address can be taken. When compiling Fortran, + temporaries can be used directly instead, because + it is illegal to take the address of a variable. + +The following example makes use of some of the memory +instructions. Pointers are stored in long temporaries. + + %A0 =l alloc4 8 # stack allocate an array A of 2 words + %A1 =l add %A0, 4 + storew 43, %A0 # A[0] <- 43 + storew 255, %A1 # A[1] <- 255 + %v1 =w loadw %A0 # %v1 <- A[0] as word + %v2 =w loadsb %A1 # %v2 <- A[1] as signed byte + %v3 =w add %v1, %v2 # %v3 is 42 here + +~ Comparisons +~~~~~~~~~~~~~ + +Comparison instructions return an integer value (either a word +or a long), and compare values of arbitrary types. The returned +value is 1 if the two operands satisfy the comparison +relation, or 0 otherwise. The names of comparisons respect +a standard naming scheme in three parts. + + 1. All comparisons start with the letter `c`. + + 2. Then comes a comparison type. The following + types are available for integer comparisons: + + * `eq` for equality + * `ne` for inequality + * `sle` for signed lower or equal + * `slt` for signed lower + * `sge` for signed greater or equal + * `sgt` for signed greater + * `ule` for unsigned lower or equal + * `ult` for unsigned lower + * `uge` for unsigned greater or equal + * `ugt` for unsigned greater + + Floating point comparisons use one of these types: + + * `eq` for equality + * `ne` for inequality + * `le` for lower or equal + * `lt` for lower + * `ge` for greater or equal + * `gt` for greater + * `o` for ordered (no operand is a NaN) + * `uo` for unordered (at least one operand is a NaN) + + Because floating point types always have a sign bit, + all the comparisons available are signed. + + 3. Finally, the instruction name is terminated with a + basic type suffix precising the type of the operands + to be compared. + +For example, `cod` (`I(dd,dd)`) compares two double-precision +floating point numbers and returns 1 if the two floating points +are not NaNs, or 0 otherwise. The `csltw` (`I(ww,ww)`) +instruction compares two words representing signed numbers and +returns 1 when the first argument is smaller than the second one. + +~ Conversions +~~~~~~~~~~~~~ + +Conversion operations change the representation of a value, +possibly modifying it if the target type cannot hold the value +of the source type. Conversions can extend the precision of a +temporary (e.g., from signed 8-bit to 32-bit), or convert a +floating point into an integer and vice versa. + + * `extsw`, `extuw` -- `l(w)` + * `extsh`, `extuh` -- `I(ww)` + * `extsb`, `extub` -- `I(ww)` + * `exts` -- `d(s)` + * `truncd` -- `s(d)` + * `stosi` -- `I(ss)` + * `stoui` -- `I(ss)` + * `dtosi` -- `I(dd)` + * `dtoui` -- `I(dd)` + * `swtof` -- `F(ww)` + * `uwtof` -- `F(ww)` + * `sltof` -- `F(ll)` + * `ultof` -- `F(ll)` + +Extending the precision of a temporary is done using the +`ext` family of instructions. Because QBE types do not +specify the signedness (like in LLVM), extension instructions +exist to sign-extend and zero-extend a value. For example, +`extsb` takes a word argument and sign-extends the 8 +least-significant bits to a full word or long, depending on +the return type. + +The instructions `exts` (extend single) and `truncd` (truncate +double) are provided to change the precision of a floating +point value. When the double argument of `truncd` cannot +be represented as a single-precision floating point, it is +truncated towards zero. + +Converting between signed integers and floating points is done +using `stosi` (single to signed integer), `stoui` (single to +unsigned integer, `dtosi` (double to signed integer), `dtoui` +(double to unsigned integer), `swtof` (signed word to float), +`uwtof` (unsigned word to float), `sltof` (signed long to +float) and `ultof` (unsigned long to float). + +Because of <@ Subtyping >, there is no need to have an +instruction to lower the precision of an integer temporary. + +~ Cast and Copy +~~~~~~~~~~~~~~~ + +The `cast` and `copy` instructions return the bits of their +argument verbatim. However a `cast` will change an integer +into a floating point of the same width and vice versa. + + * `cast` -- `wlsd(sdwl)` + * `copy` -- `T(T)` + +Casts can be used to make bitwise operations on the +representation of floating point numbers. For example +the following program will compute the opposite of the +single-precision floating point number `%f` into `%rs`. + + %b0 =w cast %f + %b1 =w xor 2147483648, %b0 # flip the msb + %rs =s cast %b1 + +~ Call +~~~~~~ + + `bnf + CALL := [%IDENT '=' ABITY] 'call' VAL '(' (ARG), ')' + + ARG := + ABITY VAL # Regular argument + | 'env' VAL # Environment argument (first) + | '...' # Variadic marker + + SUBWTY := 'sb' | 'ub' | 'sh' | 'uh' # Sub-word types + ABITY := BASETY | SUBWTY | :IDENT + +The call instruction is special in several ways. It is not +a three-address instruction and requires the type of all +its arguments to be given. Also, the return type can be +either a base type or an aggregate type. These specifics +are required to compile calls with C compatibility (i.e., +to respect the ABI). + +When an aggregate type is used as argument type or return +type, the value respectively passed or returned needs to be +a pointer to a memory location holding the value. This is +because aggregate types are not first-class citizens of +the IL. + +Sub-word types are used for arguments and return values +of width less than a word. Details on these types are +presented in the <@ Functions > section. Arguments with +sub-word types need not be sign or zero extended according +to their type. Calls with a sub-word return type define +a temporary of base type `w` with its most significant bits +unspecified. + +Unless the called function does not return a value, a +return temporary must be specified, even if it is never +used afterwards. + +An environment parameter can be passed as first argument +using the `env` keyword. The passed value must be a 64-bit +integer. If the called function does not expect an environment +parameter, it will be safely discarded. See the <@ Functions > +section for more information about environment parameters. + +When the called function is variadic, there must be a `...` +marker separating the named and variadic arguments. + +~ Variadic +~~~~~~~~~~ + +The `vastart` and `vaarg` instructions provide a portable +way to access the extra parameters of a variadic function. + + * `vastart` -- `(m)` + * `vaarg` -- `T(mmmm)` + +The `vastart` instruction initializes a *variable argument +list* used to access the extra parameters of the enclosing +variadic function. It is safe to call it multiple times. + +The `vaarg` instruction fetches the next argument from +a variable argument list. It is currently limited to +fetching arguments that have a base type. This instruction +is essentially effectful: calling it twice in a row will +return two consecutive arguments from the argument list. + +Both instructions take a pointer to a variable argument +list as sole argument. The size and alignment of variable +argument lists depend on the target used. However, it +is possible to conservatively use the maximum size and +alignment required by all the targets. + + type :valist = align 8 { 24 } # For amd64_sysv + type :valist = align 8 { 8 } # For amd64_win + type :valist = align 8 { 32 } # For arm64 + type :valist = align 8 { 8 } # For rv64 + +The following example defines a variadic function adding +its first three arguments. + + function s $add3(s %a, ...) { + @start + %ap =l alloc8 32 + vastart %ap + %r =s call $vadd(s %a, l %ap) + ret %r + } + + function s $vadd(s %a, l %ap) { + @start + %b =s vaarg %ap + %c =s vaarg %ap + %d =s add %a, %b + %e =s add %d, %c + ret %e + } + +~ Phi +~~~~~ + + `bnf + PHI := %IDENT '=' BASETY 'phi' ( @IDENT VAL ), + +First and foremost, phi instructions are NOT necessary when +writing a frontend to QBE. One solution to avoid having to +deal with SSA form is to use stack allocated variables for +all source program variables and perform assignments and +lookups using <@ Memory > operations. This is what LLVM +users typically do. + +Another solution is to simply emit code that is not in SSA +form! Contrary to LLVM, QBE is able to fixup programs not +in SSA form without requiring the boilerplate of loading +and storing in memory. For example, the following program +will be correctly compiled by QBE. + + @start + %x =w copy 100 + %s =w copy 0 + @loop + %s =w add %s, %x + %x =w sub %x, 1 + jnz %x, @loop, @end + @end + ret %s + +Now, if you want to know what phi instructions are and how +to use them in QBE, you can read the following. + +Phi instructions are specific to SSA form. In SSA form +values can only be assigned once, without phi instructions, +this requirement is too strong to represent many programs. +For example consider the following C program. + + int f(int x) { + int y; + if (x) + y = 1; + else + y = 2; + return y; + } + +The variable `y` is assigned twice, the solution to +translate it in SSA form is to insert a phi instruction. + + @ifstmt + jnz %x, @ift, @iff + @ift + jmp @retstmt + @iff + jmp @retstmt + @retstmt + %y =w phi @ift 1, @iff 2 + ret %y + +Phi instructions return one of their arguments depending +on where the control came from. In the example, `%y` is +set to 1 if the `@ift` branch is taken, or it is set to +2 otherwise. + +An important remark about phi instructions is that QBE +assumes that if a variable is defined by a phi it respects +all the SSA invariants. So it is critical to not use phi +instructions unless you know exactly what you are doing. + +- 8. Instructions Index +----------------------- + + * <@ Arithmetic and Bits >: + + * `add` + * `and` + * `div` + * `mul` + * `neg` + * `or` + * `rem` + * `sar` + * `shl` + * `shr` + * `sub` + * `udiv` + * `urem` + * `xor` + + * <@ Memory >: + + * `alloc16` + * `alloc4` + * `alloc8` + * `blit` + * `loadd` + * `loadl` + * `loads` + * `loadsb` + * `loadsh` + * `loadsw` + * `loadub` + * `loaduh` + * `loaduw` + * `loadw` + * `storeb` + * `stored` + * `storeh` + * `storel` + * `stores` + * `storew` + + * <@ Comparisons >: + + * `ceqd` + * `ceql` + * `ceqs` + * `ceqw` + * `cged` + * `cges` + * `cgtd` + * `cgts` + * `cled` + * `cles` + * `cltd` + * `clts` + * `cned` + * `cnel` + * `cnes` + * `cnew` + * `cod` + * `cos` + * `csgel` + * `csgew` + * `csgtl` + * `csgtw` + * `cslel` + * `cslew` + * `csltl` + * `csltw` + * `cugel` + * `cugew` + * `cugtl` + * `cugtw` + * `culel` + * `culew` + * `cultl` + * `cultw` + * `cuod` + * `cuos` + + * <@ Conversions >: + + * `dtosi` + * `dtoui` + * `exts` + * `extsb` + * `extsh` + * `extsw` + * `extub` + * `extuh` + * `extuw` + * `sltof` + * `ultof` + * `stosi` + * `stoui` + * `swtof` + * `uwtof` + * `truncd` + + * <@ Cast and Copy > : + + * `cast` + * `copy` + + * <@ Call >: + + * `call` + + * <@ Variadic >: + + * `vastart` + * `vaarg` + + * <@ Phi >: + + * `phi` + + * <@ Jumps >: + + * `hlt` + * `jmp` + * `jnz` + * `ret` diff --git a/src/qbe/doc/llvm.txt b/src/qbe/doc/llvm.txt new file mode 100644 index 00000000..a21fc1fc --- /dev/null +++ b/src/qbe/doc/llvm.txt @@ -0,0 +1,98 @@ + =========== + QBE vs LLVM + =========== + + +Both QBE and LLVM are compiler backends using an SSA +representation. This document will explain why LLVM +does not make QBE a redundant project. Obviously, +everything following is biased, because written by me. + +- Scope +------- + +QBE is a much smaller scale project with different goals +than LLVM. + + * QBE is for amateur language designers. + + It does not address all the problems faced when + conceiving an industry-grade language. If you are + toying with some language ideas, using LLVM will + be like hauling your backpack with a truck, but + using QBE will feel more like riding a bicycle. + + * QBE is about the first 70%, not the last 30%. + + It attempts to pinpoint, in the extremely vast + compilation literature, the optimizations that get + you 70% of the performance in 10% of the code of + full blown compilers. + + For example, copy propagation on SSA form is + implemented in 160 lines of code in QBE! + + * QBE is extremely hackable. + + First, it is, and will remain, a small project + (less than 8 kloc). Second, it is programmed in + non-fancy C99 without any dependencies. Third, + it is able to dump the IL and debug information in + a uniform format after each pass. + + On my Core 2 Duo machine, QBE compiles in half a + second (without optimizations). + +- Features +---------- + +LLVM is definitely more packed with features, but there +are a few things provided in QBE to consider. + + * LLVM does NOT provide full C compatibility for you. + + In more technical terms, any language that provides + good C compatibility and uses LLVM as a backend + needs to reimplement large chunks of the ABI in + its frontend! This well known issue in the LLVM + community causes a great deal of duplication + and bugs. + + Implementing a complete C ABI (with struct arguments + and returns) is incredibly tricky, and not really + a lot of fun. QBE provides you with IL operations + to call in (and be called by) C with no pain. + Moreover the ABI implementation in QBE has been + thoroughly tested by fuzzing and manual tests. + + * LLVM IL is more cluttered with memory operations. + + Implementing SSA construction is hard. To save its + users from having to implement it, LLVM provides + stack slots. This means that one increment of + a variable `v` will be composed of three LLVM + instructions: one load, one add, and one store. + + QBE provides simple non-SSA temporaries, so + incrementing `v` is simply done with one instruction + `%v =w add %v, 1`. + + This could seem cosmetic, but dividing the size of + the IL by three makes it easier for the frontend + writers to spot bugs in the generated code. + + * LLVM IL is more cluttered with type annotations and + casts. + + For the sake of advanced optimizations and + correctness, LLVM has complex IL types. However, + only a few types are really first class and many + operations of source languages require casts to be + compiled. + + Because QBE makes a much lighter use of types, the + IL is more readable and shorter. It can of course be + argued back that the correctness of QBE is jeopardized, + but remember that, in practice, the large amount + of casts necessary in LLVM IL is undermining the + overall effectiveness of the type system. diff --git a/src/qbe/doc/native_win.txt b/src/qbe/doc/native_win.txt new file mode 100644 index 00000000..bc88f05f --- /dev/null +++ b/src/qbe/doc/native_win.txt @@ -0,0 +1,15 @@ +There is an experimental amd64_win (native Windows ABI and calling +convention). + +In tree, this is currently only tested via cross-compilation from a +Linux host, and using wine to run the tests. + +You'll need something like: + + sudo apt install mingw64-w64 dos2unix wine + +and then + + make check-amd64_win + +should pass. diff --git a/src/qbe/doc/rv64.txt b/src/qbe/doc/rv64.txt new file mode 100644 index 00000000..17f6072e --- /dev/null +++ b/src/qbe/doc/rv64.txt @@ -0,0 +1,20 @@ +========= +RISC-V 64 +========= + +- Known issues +-------------- + +ABI with structs containing floats is not yet supported. + +- Possible improvements +----------------------- + +rv64_isel() could turn compare used only with jnz into b{lt,ge}[u]. + +- Helpful links +--------------- + +RISC-V spec: https://github.com/riscv/riscv-isa-manual/releases/latest/download/riscv-spec.pdf +ASM manual: https://github.com/riscv-non-isa/riscv-asm-manual/blob/master/riscv-asm.md +ABI: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc diff --git a/src/qbe/doc/win.txt b/src/qbe/doc/win.txt new file mode 100644 index 00000000..8d0ca797 --- /dev/null +++ b/src/qbe/doc/win.txt @@ -0,0 +1,23 @@ + =================== + Windows Quick Start + =================== + +Only 64-bit versions of windows are supported. To compile +this software you will need to get a normal UNIX toolchain. +There are several ways to get one, but I will only describe +how I did it. + + 1. Download and install [@1 MSYS2] (the x86_64 version). + 2. In an MSYS2 terminal, run the following command. + + pacman -S git make mingw-w64-x86_64-gcc mingw-w64-x86_64-gdb + + 3. Restart the MSYS2 terminal. + 4. In the new terminal, clone QBE. + + git clone git://c9x.me/qbe.git + + 5. Compile using `make`. + + +[1] http://www.msys2.org diff --git a/src/qbe/emit.c b/src/qbe/emit.c new file mode 100644 index 00000000..4bd67d18 --- /dev/null +++ b/src/qbe/emit.c @@ -0,0 +1,271 @@ +#include "all.h" + +enum { + SecText, + SecData, + SecBss, +}; + +void +emitlnk(char *n, Lnk *l, int s, FILE *f) +{ + static char *sec[2][3] = { + [0][SecText] = ".text", + [0][SecData] = ".data", + [0][SecBss] = ".bss", + [1][SecText] = ".abort \"unreachable\"", + [1][SecData] = ".section .tdata,\"awT\"", + [1][SecBss] = ".section .tbss,\"awT\"", + }; + char *pfx, *sfx; + + pfx = n[0] == '"' ? "" : T.assym; + sfx = ""; + if (T.apple && l->thread) { + l->sec = "__DATA"; + l->secf = "__thread_data,thread_local_regular"; + sfx = "$tlv$init"; + fputs( + ".section __DATA,__thread_vars," + "thread_local_variables\n", + f + ); + fprintf(f, "%s%s:\n", pfx, n); + fprintf(f, + "\t.quad __tlv_bootstrap\n" + "\t.quad 0\n" + "\t.quad %s%s%s\n\n", + pfx, n, sfx + ); + } + if (l->sec) { + fprintf(f, ".section %s", l->sec); + if (l->secf) + fprintf(f, ",%s", l->secf); + } else + fputs(sec[l->thread != 0][s], f); + fputc('\n', f); + if (l->align) + fprintf(f, ".balign %d\n", l->align); + if (l->export) + fprintf(f, ".globl %s%s\n", pfx, n); + fprintf(f, "%s%s%s:\n", pfx, n, sfx); +} + +void +emitfnlnk(char *n, Lnk *l, FILE *f) +{ + emitlnk(n, l, SecText, f); +} + +void +emitdat(Dat *d, FILE *f) +{ + static struct { + char decl[8]; + int64_t mask; + } di[] = { + [DB] = {"\t.byte", 0xffL}, + [DH] = {"\t.short", 0xffffL}, + [DW] = {"\t.int", 0xffffffffL}, + [DL] = {"\t.quad", -1L}, + }; + static int64_t zero; + char *p; + + switch (d->type) { + case DStart: + zero = 0; + break; + case DEnd: + if (d->lnk->common) { + if (zero == -1) + die("invalid common data definition"); + p = d->name[0] == '"' ? "" : T.assym; + fprintf(f, ".comm %s%s,%"PRId64, + p, d->name, zero); + if (d->lnk->align) + fprintf(f, ",%d", d->lnk->align); + fputc('\n', f); + } + else if (zero != -1) { + emitlnk(d->name, d->lnk, SecBss, f); + fprintf(f, "\t.fill %"PRId64",1,0\n", zero); + } + break; + case DZ: + if (zero != -1) + zero += d->u.num; + else + fprintf(f, "\t.fill %"PRId64",1,0\n", d->u.num); + break; + default: + if (zero != -1) { + emitlnk(d->name, d->lnk, SecData, f); + if (zero > 0) + fprintf(f, "\t.fill %"PRId64",1,0\n", zero); + zero = -1; + } + if (d->isstr) { + if (d->type != DB) + err("strings only supported for 'b' currently"); + fprintf(f, "\t.ascii %s\n", d->u.str); + } + else if (d->isref) { + p = d->u.ref.name[0] == '"' ? "" : T.assym; + fprintf(f, "%s %s%s%+"PRId64"\n", + di[d->type].decl, p, d->u.ref.name, + d->u.ref.off); + } + else { + fprintf(f, "%s %"PRId64"\n", + di[d->type].decl, + d->u.num & di[d->type].mask); + } + break; + } +} + +typedef struct Asmbits Asmbits; + +struct Asmbits { + bits n; + int size; + Asmbits *link; +}; + +static Asmbits *stash; + +int +stashbits(bits n, int size) +{ + Asmbits **pb, *b; + int i; + + assert(size == 4 || size == 8 || size == 16); + for (pb=&stash, i=0; (b=*pb); pb=&b->link, i++) + if (size <= b->size && b->n == n) + return i; + b = emalloc(sizeof *b); + b->n = n; + b->size = size; + b->link = 0; + *pb = b; + return i; +} + +static void +emitfin(FILE *f, char *sec[3]) +{ + Asmbits *b; + int lg, i; + union { int32_t i; float f; } u; + + if (!stash) + return; + fprintf(f, "/* floating point constants */\n"); + for (lg=4; lg>=2; lg--) + for (b=stash, i=0; b; b=b->link, i++) { + if (b->size == (1<n); + else if (lg == 3) + fprintf(f, + "\n\t.quad %"PRId64 + " /* %f */\n\n", + (int64_t)b->n, + *(double *)&b->n); + else if (lg == 2) { + u.i = b->n; + fprintf(f, + "\n\t.int %"PRId32 + " /* %f */\n\n", + u.i, (double)u.f); + } + } + } + while ((b=stash)) { + stash = b->link; + free(b); + } +} + +void +elf_emitfin(FILE *f) +{ + static char *sec[3] = { ".rodata", ".rodata", ".rodata" }; + + emitfin(f ,sec); + fprintf(f, ".section .note.GNU-stack,\"\",@progbits\n"); +} + +void +elf_emitfnfin(char *fn, FILE *f) +{ + fprintf(f, ".type %s, @function\n", fn); + fprintf(f, ".size %s, .-%s\n", fn, fn); +} + +void +macho_emitfin(FILE *f) +{ + static char *sec[3] = { + "__TEXT,__literal4,4byte_literals", + "__TEXT,__literal8,8byte_literals", + ".abort \"unreachable\"", + }; + + emitfin(f, sec); +} + +void +pe_emitfin(FILE *f) +{ + static char *sec[3] = { ".rodata", ".rodata", ".rodata" }; + + emitfin(f, sec); +} + +static uint32_t *file; +static uint nfile; +static uint curfile; + +void +emitdbgfile(char *fn, FILE *f) +{ + uint32_t id; + uint n; + + id = intern(fn); + for (n=0; ntype != CBits) + return 0; + if (w) + return (uint64_t)c->bits.i == k; + else + return (uint32_t)c->bits.i == (uint32_t)k; +} + +int +foldint(Con *res, int op, int w, Con *cl, Con *cr) +{ + union { + int64_t s; + uint64_t u; + float fs; + double fd; + } l, r; + uint64_t x; + Sym sym; + int typ; + + memset(&sym, 0, sizeof sym); + typ = CBits; + l.s = cl->bits.i; + r.s = cr->bits.i; + if (op == Oadd) { + if (cl->type == CAddr) { + if (cr->type == CAddr) + return 1; + typ = CAddr; + sym = cl->sym; + } + else if (cr->type == CAddr) { + typ = CAddr; + sym = cr->sym; + } + } + else if (op == Osub) { + if (cl->type == CAddr) { + if (cr->type != CAddr) { + typ = CAddr; + sym = cl->sym; + } else if (!symeq(cl->sym, cr->sym)) + return 1; + } + else if (cr->type == CAddr) + return 1; + } + else if (cl->type == CAddr || cr->type == CAddr) + return 1; + if (op == Odiv || op == Orem || op == Oudiv || op == Ourem) { + if (iscon(cr, w, 0)) + return 1; + if (op == Odiv || op == Orem) { + x = w ? INT64_MIN : INT32_MIN; + if (iscon(cr, w, -1)) + if (iscon(cl, w, x)) + return 1; + } + } + switch (op) { + case Oadd: x = l.u + r.u; break; + case Osub: x = l.u - r.u; break; + case Oneg: x = -l.u; break; + case Odiv: x = w ? l.s / r.s : (int32_t)l.s / (int32_t)r.s; break; + case Orem: x = w ? l.s % r.s : (int32_t)l.s % (int32_t)r.s; break; + case Oudiv: x = w ? l.u / r.u : (uint32_t)l.u / (uint32_t)r.u; break; + case Ourem: x = w ? l.u % r.u : (uint32_t)l.u % (uint32_t)r.u; break; + case Omul: x = l.u * r.u; break; + case Oand: x = l.u & r.u; break; + case Oor: x = l.u | r.u; break; + case Oxor: x = l.u ^ r.u; break; + case Osar: x = (w ? l.s : (int32_t)l.s) >> (r.u & (31|w<<5)); break; + case Oshr: x = (w ? l.u : (uint32_t)l.u) >> (r.u & (31|w<<5)); break; + case Oshl: x = l.u << (r.u & (31|w<<5)); break; + case Oextsb: x = (int8_t)l.u; break; + case Oextub: x = (uint8_t)l.u; break; + case Oextsh: x = (int16_t)l.u; break; + case Oextuh: x = (uint16_t)l.u; break; + case Oextsw: x = (int32_t)l.u; break; + case Oextuw: x = (uint32_t)l.u; break; + case Ostosi: x = w ? (int64_t)cl->bits.s : (int32_t)cl->bits.s; break; + case Ostoui: x = w ? (uint64_t)cl->bits.s : (uint32_t)cl->bits.s; break; + case Odtosi: x = w ? (int64_t)cl->bits.d : (int32_t)cl->bits.d; break; + case Odtoui: x = w ? (uint64_t)cl->bits.d : (uint32_t)cl->bits.d; break; + case Ocast: + x = l.u; + if (cl->type == CAddr) { + typ = CAddr; + sym = cl->sym; + } + break; + default: + if (Ocmpw <= op && op <= Ocmpl1) { + if (op <= Ocmpw1) { + l.u = (int32_t)l.u; + r.u = (int32_t)r.u; + } else + op -= Ocmpl - Ocmpw; + switch (op - Ocmpw) { + case Ciule: x = l.u <= r.u; break; + case Ciult: x = l.u < r.u; break; + case Cisle: x = l.s <= r.s; break; + case Cislt: x = l.s < r.s; break; + case Cisgt: x = l.s > r.s; break; + case Cisge: x = l.s >= r.s; break; + case Ciugt: x = l.u > r.u; break; + case Ciuge: x = l.u >= r.u; break; + case Cieq: x = l.u == r.u; break; + case Cine: x = l.u != r.u; break; + default: die("unreachable"); + } + } + else if (Ocmps <= op && op <= Ocmps1) { + switch (op - Ocmps) { + case Cfle: x = l.fs <= r.fs; break; + case Cflt: x = l.fs < r.fs; break; + case Cfgt: x = l.fs > r.fs; break; + case Cfge: x = l.fs >= r.fs; break; + case Cfne: x = l.fs != r.fs; break; + case Cfeq: x = l.fs == r.fs; break; + case Cfo: x = l.fs < r.fs || l.fs >= r.fs; break; + case Cfuo: x = !(l.fs < r.fs || l.fs >= r.fs); break; + default: die("unreachable"); + } + } + else if (Ocmpd <= op && op <= Ocmpd1) { + switch (op - Ocmpd) { + case Cfle: x = l.fd <= r.fd; break; + case Cflt: x = l.fd < r.fd; break; + case Cfgt: x = l.fd > r.fd; break; + case Cfge: x = l.fd >= r.fd; break; + case Cfne: x = l.fd != r.fd; break; + case Cfeq: x = l.fd == r.fd; break; + case Cfo: x = l.fd < r.fd || l.fd >= r.fd; break; + case Cfuo: x = !(l.fd < r.fd || l.fd >= r.fd); break; + default: die("unreachable"); + } + } + else + die("unreachable"); + } + *res = (Con){.type=typ, .sym=sym, .bits={.i=x}}; + return 0; +} + +static void +foldflt(Con *res, int op, int w, Con *cl, Con *cr) +{ + float xs, ls, rs; + double xd, ld, rd; + + if (cl->type != CBits || cr->type != CBits) + err("invalid address operand for '%s'", optab[op].name); + *res = (Con){.type = CBits}; + memset(&res->bits, 0, sizeof(res->bits)); + if (w) { + ld = cl->bits.d; + rd = cr->bits.d; + switch (op) { + case Oadd: xd = ld + rd; break; + case Osub: xd = ld - rd; break; + case Oneg: xd = -ld; break; + case Odiv: xd = ld / rd; break; + case Omul: xd = ld * rd; break; + case Oswtof: xd = (int32_t)cl->bits.i; break; + case Ouwtof: xd = (uint32_t)cl->bits.i; break; + case Osltof: xd = (int64_t)cl->bits.i; break; + case Oultof: xd = (uint64_t)cl->bits.i; break; + case Oexts: xd = cl->bits.s; break; + case Ocast: xd = ld; break; + default: die("unreachable"); + } + res->bits.d = xd; + res->flt = 2; + } else { + ls = cl->bits.s; + rs = cr->bits.s; + switch (op) { + case Oadd: xs = ls + rs; break; + case Osub: xs = ls - rs; break; + case Oneg: xs = -ls; break; + case Odiv: xs = ls / rs; break; + case Omul: xs = ls * rs; break; + case Oswtof: xs = (int32_t)cl->bits.i; break; + case Ouwtof: xs = (uint32_t)cl->bits.i; break; + case Osltof: xs = (int64_t)cl->bits.i; break; + case Oultof: xs = (uint64_t)cl->bits.i; break; + case Otruncd: xs = cl->bits.d; break; + case Ocast: xs = ls; break; + default: die("unreachable"); + } + res->bits.s = xs; + res->flt = 1; + } +} + +static Ref +opfold(int op, int cls, Con *cl, Con *cr, Fn *fn) +{ + Ref r; + Con c; + + if (cls == Kw || cls == Kl) { + if (foldint(&c, op, cls == Kl, cl, cr)) + return R; + } else + foldflt(&c, op, cls == Kd, cl, cr); + if (!KWIDE(cls)) + c.bits.i &= 0xffffffff; + r = newcon(&c, fn); + assert(!(cls == Ks || cls == Kd) || c.flt); + return r; +} + +/* used by GVN */ +Ref +foldref(Fn *fn, Ins *i) +{ + Ref rr; + Con *cl, *cr; + + if (rtype(i->to) != RTmp) + return R; + if (optab[i->op].canfold) { + if (rtype(i->arg[0]) != RCon) + return R; + cl = &fn->con[i->arg[0].val]; + rr = i->arg[1]; + if (req(rr, R)) + rr = CON_Z; + if (rtype(rr) != RCon) + return R; + cr = &fn->con[rr.val]; + + return opfold(i->op, i->cls, cl, cr, fn); + } + return R; +} diff --git a/src/qbe/gcm.c b/src/qbe/gcm.c new file mode 100644 index 00000000..e685d2a9 --- /dev/null +++ b/src/qbe/gcm.c @@ -0,0 +1,460 @@ +#include "all.h" + +#define NOBID (-1u) + +static int +isdivwl(Ins *i) +{ + switch (i->op) { + case Odiv: + case Orem: + case Oudiv: + case Ourem: + return KBASE(i->cls) == 0; + default: + return 0; + } +} + +int +pinned(Ins *i) +{ + return optab[i->op].pinned || isdivwl(i); +} + +/* pinned ins that can be eliminated if unused */ +static int +canelim(Ins *i) +{ + return isload(i->op) || isalloc(i->op) || isdivwl(i); +} + +static uint earlyins(Fn *, Blk *, Ins *); + +static uint +schedearly(Fn *fn, Ref r) +{ + Tmp *t; + Blk *b; + + if (rtype(r) != RTmp) + return 0; + + t = &fn->tmp[r.val]; + if (t->gcmbid != NOBID) + return t->gcmbid; + + b = fn->rpo[t->bid]; + if (t->def) { + assert(b->ins <= t->def && t->def < &b->ins[b->nins]); + t->gcmbid = 0; /* mark as visiting */ + t->gcmbid = earlyins(fn, b, t->def); + } else { + /* phis do not move */ + t->gcmbid = t->bid; + } + + return t->gcmbid; +} + +static uint +earlyins(Fn *fn, Blk *b, Ins *i) +{ + uint b0, b1; + + b0 = schedearly(fn, i->arg[0]); + assert(b0 != NOBID); + b1 = schedearly(fn, i->arg[1]); + assert(b1 != NOBID); + if (fn->rpo[b0]->depth < fn->rpo[b1]->depth) { + assert(dom(fn->rpo[b0], fn->rpo[b1])); + b0 = b1; + } + return pinned(i) ? b->id : b0; +} + +static void +earlyblk(Fn *fn, uint bid) +{ + Blk *b; + Phi *p; + Ins *i; + uint n; + + b = fn->rpo[bid]; + for (p=b->phi; p; p=p->link) + for (n=0; nnarg; n++) + schedearly(fn, p->arg[n]); + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (pinned(i)) { + schedearly(fn, i->arg[0]); + schedearly(fn, i->arg[1]); + } + schedearly(fn, b->jmp.arg); +} + +/* least common ancestor in dom tree */ +static uint +lcabid(Fn *fn, uint bid1, uint bid2) +{ + Blk *b; + + if (bid1 == NOBID) + return bid2; + if (bid2 == NOBID) + return bid1; + + b = lca(fn->rpo[bid1], fn->rpo[bid2]); + assert(b); + return b->id; +} + +static uint +bestbid(Fn *fn, uint earlybid, uint latebid) +{ + Blk *curb, *earlyb, *bestb; + + if (latebid == NOBID) + return NOBID; /* unused */ + + assert(earlybid != NOBID); + + earlyb = fn->rpo[earlybid]; + bestb = curb = fn->rpo[latebid]; + assert(dom(earlyb, curb)); + + while (curb != earlyb) { + curb = curb->idom; + if (curb->loop < bestb->loop) + bestb = curb; + } + return bestb->id; +} + +static uint lateins(Fn *, Blk *, Ins *, Ref r); +static uint latephi(Fn *, Phi *, Ref r); +static uint latejmp(Blk *, Ref r); + +/* return lca bid of ref uses */ +static uint +schedlate(Fn *fn, Ref r) +{ + Tmp *t; + Blk *b; + Use *u; + uint earlybid; + uint latebid; + uint uselatebid; + + if (rtype(r) != RTmp) + return NOBID; + + t = &fn->tmp[r.val]; + if (t->visit) + return t->gcmbid; + + t->visit = 1; + earlybid = t->gcmbid; + if (earlybid == NOBID) + return NOBID; /* not used */ + + /* reuse gcmbid for late bid */ + t->gcmbid = t->bid; + latebid = NOBID; + for (u=t->use; u<&t->use[t->nuse]; u++) { + assert(u->bid < fn->nblk); + b = fn->rpo[u->bid]; + switch (u->type) { + case UXXX: + die("unreachable"); + break; + case UPhi: + uselatebid = latephi(fn, u->u.phi, r); + break; + case UIns: + uselatebid = lateins(fn, b, u->u.ins, r); + break; + case UJmp: + uselatebid = latejmp(b, r); + break; + } + latebid = lcabid(fn, latebid, uselatebid); + } + /* latebid may be NOBID if the temp is used + * in fixed instructions that may be eliminated + * and are themselves unused transitively */ + + if (t->def && !pinned(t->def)) + t->gcmbid = bestbid(fn, earlybid, latebid); + /* else, keep the early one */ + + /* now, gcmbid is the best bid */ + return t->gcmbid; +} + +/* returns lca bid of uses or NOBID if + * the definition can be eliminated */ +static uint +lateins(Fn *fn, Blk *b, Ins *i, Ref r) +{ + uint latebid; + + assert(b->ins <= i && i < &b->ins[b->nins]); + assert(req(i->arg[0], r) || req(i->arg[1], r)); + + latebid = schedlate(fn, i->to); + if (pinned(i)) { + if (latebid == NOBID) + if (canelim(i)) + return NOBID; + return b->id; + } + + return latebid; +} + +static uint +latephi(Fn *fn, Phi *p, Ref r) +{ + uint n; + uint latebid; + + if (!p->narg) + return NOBID; /* marked as unused */ + + latebid = NOBID; + for (n = 0; n < p->narg; n++) + if (req(p->arg[n], r)) + latebid = lcabid(fn, latebid, p->blk[n]->id); + + assert(latebid != NOBID); + return latebid; +} + +static uint +latejmp(Blk *b, Ref r) +{ + if (req(b->jmp.arg, R)) + return NOBID; + else { + assert(req(b->jmp.arg, r)); + return b->id; + } +} + +static void +lateblk(Fn *fn, uint bid) +{ + Blk *b; + Phi **pp; + Ins *i; + + b = fn->rpo[bid]; + for (pp=&b->phi; *(pp);) + if (schedlate(fn, (*pp)->to) == NOBID) { + (*pp)->narg = 0; /* mark unused */ + *pp = (*pp)->link; /* remove phi */ + } else + pp = &(*pp)->link; + + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (pinned(i)) + schedlate(fn, i->to); +} + +static void +addgcmins(Fn *fn, Ins *vins, uint nins) +{ + Ins *i; + Tmp *t; + Blk *b; + + for (i=vins; i<&vins[nins]; i++) { + assert(rtype(i->to) == RTmp); + t = &fn->tmp[i->to.val]; + b = fn->rpo[t->gcmbid]; + addins(&b->ins, &b->nins, i); + } +} + +/* move live instructions to the + * end of their target block; use- + * before-def errors are fixed by + * schedblk */ +static void +gcmmove(Fn *fn) +{ + Tmp *t; + Ins *vins, *i; + uint nins; + + nins = 0; + vins = vnew(nins, sizeof vins[0], PFn); + + for (t=fn->tmp; t<&fn->tmp[fn->ntmp]; t++) { + if (t->def == 0) + continue; + if (t->bid == t->gcmbid) + continue; + i = t->def; + if (pinned(i) && !canelim(i)) + continue; + assert(rtype(i->to) == RTmp); + assert(t == &fn->tmp[i->to.val]); + if (t->gcmbid != NOBID) + addins(&vins, &nins, i); + *i = (Ins){.op = Onop}; + } + addgcmins(fn, vins, nins); +} + +/* dfs ordering */ +static Ins * +schedins(Fn *fn, Blk *b, Ins *i, Ins **pvins, uint *pnins) +{ + Ins *i0, *i1; + Tmp *t; + uint n; + + igroup(b, i, &i0, &i1); + for (i=i0; iarg[n]) != RTmp) + continue; + t = &fn->tmp[i->arg[n].val]; + if (t->bid != b->id || !t->def) + continue; + schedins(fn, b, t->def, pvins, pnins); + } + for (i=i0; istart; b; b=b->link) { + nins = 0; + for (i=b->ins; i<&b->ins[b->nins];) + i = schedins(fn, b, i, &vins, &nins); + idup(b, vins, nins); + } + vfree(vins); +} + +static int +cheap(Ins *i) +{ + int x; + + if (KBASE(i->cls) != 0) + return 0; + switch (i->op) { + case Oneg: + case Oadd: + case Osub: + case Omul: + case Oand: + case Oor: + case Oxor: + case Osar: + case Oshr: + case Oshl: + return 1; + default: + return iscmp(i->op, &x, &x); + } +} + +static void +sinkref(Fn *fn, Blk *b, Ref *pr) +{ + Ins i; + Tmp *t; + Ref r; + + if (rtype(*pr) != RTmp) + return; + t = &fn->tmp[pr->val]; + if (!t->def + || t->bid == b->id + || pinned(t->def) + || !cheap(t->def)) + return; + + /* sink t->def to b */ + i = *t->def; + r = newtmp("snk", t->cls, fn); + t = 0; /* invalidated */ + *pr = r; + i.to = r; + fn->tmp[r.val].gcmbid = b->id; + emiti(i); + sinkref(fn, b, &i.arg[0]); + sinkref(fn, b, &i.arg[1]); +} + +/* redistribute trivial ops to point of + * use to reduce register pressure + * requires rpo, use; breaks use + */ +static void +sink(Fn *fn) +{ + Blk *b; + Ins *i; + + for (b=fn->start; b; b=b->link) { + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (isload(i->op)) + sinkref(fn, b, &i->arg[0]); + else if (isstore(i->op)) + sinkref(fn, b, &i->arg[1]); + sinkref(fn, b, &b->jmp.arg); + } + addgcmins(fn, curi, &insb[NIns] - curi); +} + +/* requires use dom + * maintains rpo pred dom + * breaks use + */ +void +gcm(Fn *fn) +{ + Tmp *t; + uint bid; + + filldepth(fn); + fillloop(fn); + + for (t=fn->tmp; t<&fn->tmp[fn->ntmp]; t++) { + t->visit = 0; + t->gcmbid = NOBID; + } + for (bid=0; bidnblk; bid++) + earlyblk(fn, bid); + for (bid=0; bidnblk; bid++) + lateblk(fn, bid); + + gcmmove(fn); + filluse(fn); + curi = &insb[NIns]; + sink(fn); + filluse(fn); + schedblk(fn); + + if (debug['G']) { + fprintf(stderr, "\n> After GCM:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/gvn.c b/src/qbe/gvn.c new file mode 100644 index 00000000..92ee5eb9 --- /dev/null +++ b/src/qbe/gvn.c @@ -0,0 +1,508 @@ +#include "all.h" + +Ref con01[2]; + +static inline uint +mix(uint x0, uint x1) +{ + return x0 + 17*x1; +} + +static inline uint +rhash(Ref r) +{ + return mix(r.type, r.val); +} + +static uint +ihash(Ins *i) +{ + uint h; + + h = mix(i->op, i->cls); + h = mix(h, rhash(i->arg[0])); + h = mix(h, rhash(i->arg[1])); + + return h; +} + +static int +ieq(Ins *ia, Ins *ib) +{ + if (ia->op == ib->op) + if (ia->cls == ib->cls) + if (req(ia->arg[0], ib->arg[0])) + if (req(ia->arg[1], ib->arg[1])) + return 1; + return 0; +} + +static Ins **gvntbl; +static uint gvntbln; + +static Ins * +gvndup(Ins *i, int insert) +{ + uint idx, n; + Ins *ii; + + idx = ihash(i) % gvntbln; + for (n=1;; n++) { + ii = gvntbl[idx]; + if (!ii) + break; + if (ieq(i, ii)) + return ii; + + idx++; + if (gvntbln <= idx) + idx = 0; + } + if (insert) + gvntbl[idx] = i; + return 0; +} + +static void +replaceuse(Fn *fn, Use *u, Ref r1, Ref r2) +{ + Blk *b; + Ins *i; + Phi *p; + Ref *pr; + Tmp *t2; + int n; + + t2 = 0; + if (rtype(r2) == RTmp) + t2 = &fn->tmp[r2.val]; + b = fn->rpo[u->bid]; + switch (u->type) { + case UPhi: + p = u->u.phi; + for (pr=p->arg; pr<&p->arg[p->narg]; pr++) + if (req(*pr, r1)) + *pr = r2; + if (t2) + adduse(t2, UPhi, b, p); + break; + case UIns: + i = u->u.ins; + for (n=0; n<2; n++) + if (req(i->arg[n], r1)) + i->arg[n] = r2; + if (t2) + adduse(t2, UIns, b, i); + break; + case UJmp: + if (req(b->jmp.arg, r1)) + b->jmp.arg = r2; + if (t2) + adduse(t2, UJmp, b); + break; + case UXXX: + die("unreachable"); + } +} + +static void +replaceuses(Fn *fn, Ref r1, Ref r2) +{ + Tmp *t1; + Use *u; + + assert(rtype(r1) == RTmp); + t1 = &fn->tmp[r1.val]; + for (u=t1->use; u<&t1->use[t1->nuse]; u++) + replaceuse(fn, u, r1, r2); + t1->nuse = 0; +} + +static void +dedupphi(Fn *fn, Blk *b) +{ + Phi *p, **pp; + Ref r; + + for (pp=&b->phi; (p=*pp);) { + r = phicopyref(fn, b, p); + if (!req(r, R)) { + replaceuses(fn, p->to, r); + p->to = R; + *pp = p->link; + } else + pp = &p->link; + } +} + +static int +rcmp(Ref a, Ref b) +{ + if (rtype(a) != rtype(b)) + return rtype(a) - rtype(b); + return a.val - b.val; +} + +static void +normins(Fn *fn, Ins *i) +{ + uint n; + int64_t v; + Ref r; + + /* truncate constant bits to + * 32 bits for s/w uses */ + for (n=0; n<2; n++) { + if (!KWIDE(argcls(i, n))) + if (isconbits(fn, i->arg[n], &v)) + if ((v & 0xffffffff) != v) + i->arg[n] = getcon(v & 0xffffffff, fn); + } + /* order arg[0] <= arg[1] for + * commutative ops, preferring + * RTmp in arg[0] */ + if (optab[i->op].commutes) + if (rcmp(i->arg[0], i->arg[1]) > 0) { + r = i->arg[1]; + i->arg[1] = i->arg[0]; + i->arg[0] = r; + } +} + +static int +negcon(int cls, Con *c) +{ + static Con z = {.type = CBits, .bits.i = 0}; + + return foldint(c, Osub, cls, &z, c); +} + +static void +assoccon(Fn *fn, Blk *b, Ins *i1) +{ + Tmp *t2; + Ins *i2; + int op, fail; + Con c, c1, c2; + + op = i1->op; + if (op == Osub) + op = Oadd; + + if (!optab[op].assoc + || KBASE(i1->cls) != 0 + || rtype(i1->arg[0]) != RTmp + || rtype(i1->arg[1]) != RCon) + return; + c1 = fn->con[i1->arg[1].val]; + + t2 = &fn->tmp[i1->arg[0].val]; + if (t2->def == 0) + return; + i2 = t2->def; + + if (op != (i2->op == Osub ? Oadd : i2->op) + || rtype(i2->arg[1]) != RCon) + return; + c2 = fn->con[i2->arg[1].val]; + + assert(KBASE(i2->cls) == 0); + assert(KWIDE(i2->cls) >= KWIDE(i1->cls)); + + if (i1->op == Osub && negcon(i1->cls, &c1)) + return; + if (i2->op == Osub && negcon(i2->cls, &c2)) + return; + if (foldint(&c, op, i1->cls, &c1, &c2)) + return; + + if (op == Oadd && c.type == CBits) + if ((i1->cls == Kl && c.bits.i < 0) + || (i1->cls == Kw && (int32_t)c.bits.i < 0)) { + fail = negcon(i1->cls, &c); + assert(fail == 0); + op = Osub; + } + + i1->op = op; + i1->arg[0] = i2->arg[0]; + i1->arg[1] = newcon(&c, fn); + adduse(&fn->tmp[i1->arg[0].val], UIns, b, i1); +} + +static void +killins(Fn *fn, Ins *i, Ref r) +{ + replaceuses(fn, i->to, r); + *i = (Ins){.op = Onop}; +} + +static void +dedupins(Fn *fn, Blk *b, Ins *i) +{ + Ref r; + Ins *i1; + + normins(fn, i); + if (i->op == Onop || pinned(i)) + return; + + /* when sel instructions are inserted + * before gvn, we may want to optimize + * them here */ + assert(i->op != Osel0); + assert(!req(i->to, R)); + assoccon(fn, b, i); + + r = copyref(fn, b, i); + if (!req(r, R)) { + killins(fn, i, r); + return; + } + r = foldref(fn, i); + if (!req(r, R)) { + killins(fn, i, r); + return; + } + i1 = gvndup(i, 1); + if (i1) { + killins(fn, i, i1->to); + return; + } +} + +int +cmpeqz(Fn *fn, Ref r, Ref *arg, int *cls, int *eqval) +{ + Ins *i; + + if (rtype(r) != RTmp) + return 0; + i = fn->tmp[r.val].def; + if (i) + if (optab[i->op].cmpeqwl) + if (req(i->arg[1], CON_Z)) { + *arg = i->arg[0]; + *cls = argcls(i, 0); + *eqval = optab[i->op].eqval; + return 1; + } + return 0; +} + +static int +branchdom(Fn *fn, Blk *bif, Blk *bbr1, Blk *bbr2, Blk *b) +{ + assert(bif->jmp.type == Jjnz); + + if (b != bif + && dom(bbr1, b) + && !reachesnotvia(fn, bbr2, b, bif)) + return 1; + + return 0; +} + +static int +domzero(Fn *fn, Blk *d, Blk *b, int *z) +{ + if (branchdom(fn, d, d->s1, d->s2, b)) { + *z = 0; + return 1; + } + if (branchdom(fn, d, d->s2, d->s1, b)) { + *z = 1; + return 1; + } + return 0; +} + +/* infer 0/non-0 value from dominating jnz */ +int +zeroval(Fn *fn, Blk *b, Ref r, int cls, int *z) +{ + Blk *d; + Ref arg; + int cls1, eqval; + + for (d=b->idom; d; d=d->idom) { + if (d->jmp.type != Jjnz) + continue; + if (req(r, d->jmp.arg) + && cls == Kw + && domzero(fn, d, b, z)) { + return 1; + } + if (cmpeqz(fn, d->jmp.arg, &arg, &cls1, &eqval) + && req(r, arg) + && cls == cls1 + && domzero(fn, d, b, z)) { + *z ^= eqval; + return 1; + } + } + return 0; +} + +static int +usecls(Use *u, Ref r, int cls) +{ + int k; + + switch (u->type) { + case UIns: + k = Kx; /* widest use */ + if (req(u->u.ins->arg[0], r)) + k = argcls(u->u.ins, 0); + if (req(u->u.ins->arg[1], r)) + if (k == Kx || !KWIDE(k)) + k = argcls(u->u.ins, 1); + return k == Kx ? cls : k; + case UPhi: + if (req(u->u.phi->to, R)) + return cls; /* eliminated */ + return u->u.phi->cls; + case UJmp: + return Kw; + default: + break; + } + die("unreachable"); +} + +static void +propjnz0(Fn *fn, Blk *bif, Blk *s0, Blk *snon0, Ref r, int cls) +{ + Blk *b; + Tmp *t; + Use *u; + + if (s0->npred != 1 || rtype(r) != RTmp) + return; + t = &fn->tmp[r.val]; + for (u=t->use; u<&t->use[t->nuse]; u++) { + b = fn->rpo[u->bid]; + /* we may compare an l temp with a w + * comparison; so check that the use + * does not involve high bits */ + if (usecls(u, r, cls) == cls) + if (branchdom(fn, bif, s0, snon0, b)) + replaceuse(fn, u, r, CON_Z); + } +} + +static void +dedupjmp(Fn *fn, Blk *b) +{ + Blk **ps; + int64_t v; + Ref arg; + int cls, eqval, z; + + if (b->jmp.type != Jjnz) + return; + + /* propagate jmp arg as 0 through s2 */ + propjnz0(fn, b, b->s2, b->s1, b->jmp.arg, Kw); + /* propagate cmp eq/ne 0 def of jmp arg as 0 */ + if (cmpeqz(fn, b->jmp.arg, &arg, &cls, &eqval)) { + ps = (Blk*[]){b->s1, b->s2}; + propjnz0(fn, b, ps[eqval^1], ps[eqval], arg, cls); + } + + /* collapse trivial/constant jnz to jmp */ + v = 1; + z = 0; + if (b->s1 == b->s2 + || isconbits(fn, b->jmp.arg, &v) + || zeroval(fn, b, b->jmp.arg, Kw, &z)) { + if (v == 0 || z) + b->s1 = b->s2; + /* we later move active ins out of dead blks */ + b->s2 = 0; + b->jmp.type = Jjmp; + b->jmp.arg = R; + } +} + +static void +rebuildcfg(Fn *fn) +{ + uint n, nblk; + Blk *b, *s, **rpo; + Ins *i; + + nblk = fn->nblk; + rpo = emalloc(nblk * sizeof rpo[0]); + memcpy(rpo, fn->rpo, nblk * sizeof rpo[0]); + + fillcfg(fn); + + /* move instructions that were in + * killed blocks and may be active + * in the computation in the start + * block */ + s = fn->start; + for (n=0; nid != -1u) + continue; + /* blk unreachable after GVN */ + assert(b != s); + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (!optab[i->op].pinned) + if (gvndup(i, 0) == i) + addins(&s->ins, &s->nins, i); + } + free(rpo); +} + +/* requires rpo pred ssa use + * recreates rpo preds + * breaks pred use dom ssa (GCM fixes ssa) + */ +void +gvn(Fn *fn) +{ + Blk *b; + Phi *p; + Ins *i; + uint n, nins; + + con01[0] = getcon(0, fn); + con01[1] = getcon(1, fn); + + /* copy.c uses the visit bit */ + for (b=fn->start; b; b=b->link) + for (p=b->phi; p; p=p->link) + p->visit = 0; + + fillloop(fn); + narrowpars(fn); + filluse(fn); + ssacheck(fn); + + nins = 0; + for (b=fn->start; b; b=b->link) { + b->visit = 0; + nins += b->nins; + } + + gvntbln = nins + nins/2; + gvntbl = emalloc(gvntbln * sizeof gvntbl[0]); + for (n=0; nnblk; n++) { + b = fn->rpo[n]; + dedupphi(fn, b); + for (i=b->ins; i<&b->ins[b->nins]; i++) + dedupins(fn, b, i); + dedupjmp(fn, b); + } + rebuildcfg(fn); + free(gvntbl); + gvntbl = 0; + + if (debug['G']) { + fprintf(stderr, "\n> After GVN:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/ifopt.c b/src/qbe/ifopt.c new file mode 100644 index 00000000..3e45f52c --- /dev/null +++ b/src/qbe/ifopt.c @@ -0,0 +1,121 @@ +#include "all.h" + +enum { + MaxIns = 2, + MaxPhis = 2, +}; + +static int +okbranch(Blk *b) +{ + Ins *i; + int n; + + n = 0; + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op != Odbgloc) { + if (pinned(i)) + return 0; + if (i->op != Onop) + n++; + } + return n <= MaxIns; +} + +static int +okjoin(Blk *b) +{ + Phi *p; + int n; + + n = 0; + for (p=b->phi; p; p=p->link) { + if (KBASE(p->cls) != 0) + return 0; + n++; + } + return n <= MaxPhis; +} + +static int +okgraph(Blk *ifb, Blk *thenb, Blk *elseb, Blk *joinb) +{ + if (joinb->npred != 2 || !okjoin(joinb)) + return 0; + assert(thenb != elseb); + if (thenb != ifb && !okbranch(thenb)) + return 0; + if (elseb != ifb && !okbranch(elseb)) + return 0; + return 1; +} + +static void +convert(Blk *ifb, Blk *thenb, Blk *elseb, Blk *joinb) +{ + Ins *ins, sel; + Phi *p; + uint nins; + + ins = vnew(0, sizeof ins[0], PHeap); + nins = 0; + addbins(&ins, &nins, ifb); + if (thenb != ifb) + addbins(&ins, &nins, thenb); + if (elseb != ifb) + addbins(&ins, &nins, elseb); + assert(joinb->npred == 2); + if (joinb->phi) { + sel = (Ins){ + .op = Osel0, .cls = Kw, + .arg = {ifb->jmp.arg}, + }; + addins(&ins, &nins, &sel); + } + sel = (Ins){.op = Osel1}; + for (p=joinb->phi; p; p=p->link) { + sel.to = p->to; + sel.cls = p->cls; + sel.arg[0] = phiarg(p, thenb); + sel.arg[1] = phiarg(p, elseb); + addins(&ins, &nins, &sel); + } + idup(ifb, ins, nins); + ifb->jmp.type = Jjmp; + ifb->jmp.arg = R; + ifb->s1 = joinb; + ifb->s2 = 0; + joinb->npred = 1; + joinb->pred[0] = ifb; + joinb->phi = 0; + vfree(ins); +} + +/* eliminate if-then[-else] graphlets + * using sel instructions + * needs rpo pred use; breaks cfg use + */ +void +ifconvert(Fn *fn) +{ + Blk *ifb, *thenb, *elseb, *joinb; + + if (debug['K']) + fputs("\n> If-conversion:\n", stderr); + + for (ifb=fn->start; ifb; ifb=ifb->link) + if (ifgraph(ifb, &thenb, &elseb, &joinb)) + if (okgraph(ifb, thenb, elseb, joinb)) { + if (debug['K']) + fprintf(stderr, + " @%s -> @%s, @%s -> @%s\n", + ifb->name, thenb->name, elseb->name, + joinb->name); + convert(ifb, thenb, elseb, joinb); + } + + if (debug['K']) { + fprintf(stderr, "\n> After if-conversion:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/live.c b/src/qbe/live.c new file mode 100644 index 00000000..68f48b0e --- /dev/null +++ b/src/qbe/live.c @@ -0,0 +1,144 @@ +#include "all.h" + +void +liveon(BSet *v, Blk *b, Blk *s) +{ + Phi *p; + uint a; + + bscopy(v, s->in); + for (p=s->phi; p; p=p->link) + if (rtype(p->to) == RTmp) + bsclr(v, p->to.val); + for (p=s->phi; p; p=p->link) + for (a=0; anarg; a++) + if (p->blk[a] == b) + if (rtype(p->arg[a]) == RTmp) { + bsset(v, p->arg[a].val); + bsset(b->gen, p->arg[a].val); + } +} + +static void +bset(Ref r, Blk *b, int *nlv, Tmp *tmp) +{ + + if (rtype(r) != RTmp) + return; + bsset(b->gen, r.val); + if (!bshas(b->in, r.val)) { + nlv[KBASE(tmp[r.val].cls)]++; + bsset(b->in, r.val); + } +} + +/* liveness analysis + * requires rpo computation + */ +void +filllive(Fn *f) +{ + Blk *b; + Ins *i; + int k, t, m[2], n, chg, nlv[2]; + BSet u[1], v[1]; + Mem *ma; + + bsinit(u, f->ntmp); + bsinit(v, f->ntmp); + for (b=f->start; b; b=b->link) { + bsinit(b->in, f->ntmp); + bsinit(b->out, f->ntmp); + bsinit(b->gen, f->ntmp); + } + chg = 1; +Again: + for (n=f->nblk-1; n>=0; n--) { + b = f->rpo[n]; + + bscopy(u, b->out); + if (b->s1) { + liveon(v, b, b->s1); + bsunion(b->out, v); + } + if (b->s2) { + liveon(v, b, b->s2); + bsunion(b->out, v); + } + chg |= !bsequal(b->out, u); + + memset(nlv, 0, sizeof nlv); + b->out->t[0] |= T.rglob; + bscopy(b->in, b->out); + for (t=0; bsiter(b->in, &t); t++) + nlv[KBASE(f->tmp[t].cls)]++; + if (rtype(b->jmp.arg) == RCall) { + assert((int)bscount(b->in) == T.nrglob && + b->in->t[0] == T.rglob); + b->in->t[0] |= T.retregs(b->jmp.arg, nlv); + } else + bset(b->jmp.arg, b, nlv, f->tmp); + for (k=0; k<2; k++) + b->nlive[k] = nlv[k]; + for (i=&b->ins[b->nins]; i!=b->ins;) { + if ((--i)->op == Ocall && rtype(i->arg[1]) == RCall) { + b->in->t[0] &= ~T.retregs(i->arg[1], m); + for (k=0; k<2; k++) { + nlv[k] -= m[k]; + /* caller-save registers are used + * by the callee, in that sense, + * right in the middle of the call, + * they are live: */ + nlv[k] += T.nrsave[k]; + if (nlv[k] > b->nlive[k]) + b->nlive[k] = nlv[k]; + } + b->in->t[0] |= T.argregs(i->arg[1], m); + for (k=0; k<2; k++) { + nlv[k] -= T.nrsave[k]; + nlv[k] += m[k]; + } + } + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + t = i->to.val; + if (bshas(b->in, t)) + nlv[KBASE(f->tmp[t].cls)]--; + bsset(b->gen, t); + bsclr(b->in, t); + } + for (k=0; k<2; k++) + switch (rtype(i->arg[k])) { + case RMem: + ma = &f->mem[i->arg[k].val]; + bset(ma->base, b, nlv, f->tmp); + bset(ma->index, b, nlv, f->tmp); + break; + default: + bset(i->arg[k], b, nlv, f->tmp); + break; + } + for (k=0; k<2; k++) + if (nlv[k] > b->nlive[k]) + b->nlive[k] = nlv[k]; + } + } + if (chg) { + chg = 0; + goto Again; + } + + if (debug['L']) { + fprintf(stderr, "\n> Liveness analysis:\n"); + for (b=f->start; b; b=b->link) { + fprintf(stderr, "\t%-10sin: ", b->name); + dumpts(b->in, f->tmp, stderr); + fprintf(stderr, "\t out: "); + dumpts(b->out, f->tmp, stderr); + fprintf(stderr, "\t gen: "); + dumpts(b->gen, f->tmp, stderr); + fprintf(stderr, "\t live: "); + fprintf(stderr, "%d %d\n", b->nlive[0], b->nlive[1]); + } + } +} diff --git a/src/qbe/load.c b/src/qbe/load.c new file mode 100644 index 00000000..bac382c0 --- /dev/null +++ b/src/qbe/load.c @@ -0,0 +1,493 @@ +#include "all.h" + +#define MASK(w) (BIT(8*(w)-1)*2-1) /* must work when w==8 */ + +typedef struct Loc Loc; +typedef struct Slice Slice; +typedef struct Insert Insert; + +struct Loc { + enum { + LRoot, /* right above the original load */ + LLoad, /* inserting a load is allowed */ + LNoLoad, /* only scalar operations allowed */ + } type; + uint off; + Blk *blk; +}; + +struct Slice { + Ref ref; + int off; + short sz; + short cls; /* load class */ +}; + +struct Insert { + uint isphi:1; + uint num:31; + uint bid; + uint off; + union { + Ins ins; + struct { + Slice m; + Phi *p; + } phi; + } new; +}; + +static Fn *curf; +static uint inum; /* current insertion number */ +static Insert *ilog; /* global insertion log */ +static uint nlog; /* number of entries in the log */ + +int +loadsz(Ins *l) +{ + switch (l->op) { + case Oloadsb: case Oloadub: return 1; + case Oloadsh: case Oloaduh: return 2; + case Oloadsw: case Oloaduw: return 4; + case Oload: return KWIDE(l->cls) ? 8 : 4; + } + die("unreachable"); +} + +int +storesz(Ins *s) +{ + switch (s->op) { + case Ostoreb: return 1; + case Ostoreh: return 2; + case Ostorew: case Ostores: return 4; + case Ostorel: case Ostored: return 8; + } + die("unreachable"); +} + +static Ref +iins(int cls, int op, Ref a0, Ref a1, Loc *l) +{ + Insert *ist; + + vgrow(&ilog, ++nlog); + ist = &ilog[nlog-1]; + ist->isphi = 0; + ist->num = inum++; + ist->bid = l->blk->id; + ist->off = l->off; + ist->new.ins = (Ins){op, cls, R, {a0, a1}}; + return ist->new.ins.to = newtmp("ld", cls, curf); +} + +static void +cast(Ref *r, int cls, Loc *l) +{ + int cls0; + + if (rtype(*r) == RCon) + return; + assert(rtype(*r) == RTmp); + cls0 = curf->tmp[r->val].cls; + if (cls0 == cls || (cls == Kw && cls0 == Kl)) + return; + if (KWIDE(cls0) < KWIDE(cls)) { + if (cls0 == Ks) + *r = iins(Kw, Ocast, *r, R, l); + *r = iins(Kl, Oextuw, *r, R, l); + if (cls == Kd) + *r = iins(Kd, Ocast, *r, R, l); + } else { + if (cls0 == Kd && cls != Kl) + *r = iins(Kl, Ocast, *r, R, l); + if (cls0 != Kd || cls != Kw) + *r = iins(cls, Ocast, *r, R, l); + } +} + +static inline void +mask(int cls, Ref *r, bits msk, Loc *l) +{ + cast(r, cls, l); + *r = iins(cls, Oand, *r, getcon(msk, curf), l); +} + +static Ref +load(Slice sl, bits msk, Loc *l) +{ + Alias *a; + Ref r, r1; + int ld, cls, all; + Con c; + + ld = (int[]){ + [1] = Oloadub, + [2] = Oloaduh, + [4] = Oloaduw, + [8] = Oload + }[sl.sz]; + all = msk == MASK(sl.sz); + if (all) + cls = sl.cls; + else + cls = sl.sz > 4 ? Kl : Kw; + r = sl.ref; + /* sl.ref might not be live here, + * but its alias base ref will be + * (see killsl() below) */ + if (rtype(r) == RTmp) { + a = &curf->tmp[r.val].alias; + switch (a->type) { + default: + die("unreachable"); + case ALoc: + case AEsc: + case AUnk: + r = TMP(a->base); + if (!a->offset) + break; + r1 = getcon(a->offset, curf); + r = iins(Kl, Oadd, r, r1, l); + break; + case ACon: + case ASym: + memset(&c, 0, sizeof c); + c.type = CAddr; + c.sym = a->u.sym; + c.bits.i = a->offset; + r = newcon(&c, curf); + break; + } + } + r = iins(cls, ld, r, R, l); + if (!all) + mask(cls, &r, msk, l); + return r; +} + +static int +killsl(Ref r, Slice sl) +{ + Alias *a; + + if (rtype(sl.ref) != RTmp) + return 0; + a = &curf->tmp[sl.ref.val].alias; + switch (a->type) { + default: die("unreachable"); + case ALoc: + case AEsc: + case AUnk: return req(TMP(a->base), r); + case ACon: + case ASym: return 0; + } +} + +/* returns a ref containing the contents of the slice + * passed as argument, all the bits set to 0 in the + * mask argument are zeroed in the result; + * the returned ref has an integer class when the + * mask does not cover all the bits of the slice, + * otherwise, it has class sl.cls + * the procedure returns R when it fails */ +static Ref +def(Slice sl, bits msk, Blk *b, Ins *i, Loc *il) +{ + Slice sl1; + Blk *bp; + bits msk1, msks; + int off, cls, cls1, op, sz, ld; + uint np, oldl, oldt; + Ref r, r1; + Phi *p; + Insert *ist; + Loc l; + + /* invariants: + * -1- b dominates il->blk; so we can use + * temporaries of b in il->blk + * -2- if il->type != LNoLoad, then il->blk + * postdominates the original load; so it + * is safe to load in il->blk + * -3- if il->type != LNoLoad, then b + * postdominates il->blk (and by 2, the + * original load) + */ + assert(dom(b, il->blk)); + oldl = nlog; + oldt = curf->ntmp; + if (0) { + Load: + curf->ntmp = oldt; + nlog = oldl; + if (il->type != LLoad) + return R; + return load(sl, msk, il); + } + + if (!i) + i = &b->ins[b->nins]; + cls = sl.sz > 4 ? Kl : Kw; + msks = MASK(sl.sz); + + while (i > b->ins) { + --i; + if (killsl(i->to, sl) + || (i->op == Ocall && escapes(sl.ref, curf))) + goto Load; + ld = isload(i->op); + if (ld) { + sz = loadsz(i); + r1 = i->arg[0]; + r = i->to; + } else if (isstore(i->op)) { + sz = storesz(i); + r1 = i->arg[1]; + r = i->arg[0]; + } else if (i->op == Oblit1) { + assert(rtype(i->arg[0]) == RInt); + sz = abs(rsval(i->arg[0])); + assert(i > b->ins); + --i; + assert(i->op == Oblit0); + r1 = i->arg[1]; + } else + continue; + switch (alias(sl.ref, sl.off, sl.sz, r1, sz, &off, curf)) { + case MustAlias: + if (i->op == Oblit0) { + sl1 = sl; + sl1.ref = i->arg[0]; + if (off >= 0) { + assert(off < sz); + sl1.off = off; + sz -= off; + off = 0; + } else { + sl1.off = 0; + sl1.sz += off; + } + if (sz > sl1.sz) + sz = sl1.sz; + assert(sz <= 8); + sl1.sz = sz; + } + if (off < 0) { + off = -off; + msk1 = (MASK(sz) << 8*off) & msks; + op = Oshl; + } else { + msk1 = (MASK(sz) >> 8*off) & msks; + op = Oshr; + } + if ((msk1 & msk) == 0) + continue; + if (i->op == Oblit0) { + r = def(sl1, MASK(sz), b, i, il); + if (req(r, R)) + goto Load; + } + if (off) { + cls1 = cls; + if (op == Oshr && off + sl.sz > 4) + cls1 = Kl; + cast(&r, cls1, il); + r1 = getcon(8*off, curf); + r = iins(cls1, op, r, r1, il); + } + if ((msk1 & msk) != msk1 || off + sz < sl.sz) + mask(cls, &r, msk1 & msk, il); + if ((msk & ~msk1) != 0) { + r1 = def(sl, msk & ~msk1, b, i, il); + if (req(r1, R)) + goto Load; + r = iins(cls, Oor, r, r1, il); + } + if (msk == msks) + cast(&r, sl.cls, il); + return r; + case MayAlias: + if (ld) + continue; + else + goto Load; + case NoAlias: + continue; + default: + die("unreachable"); + } + } + + for (ist=ilog; ist<&ilog[nlog]; ++ist) + if (ist->isphi && ist->bid == b->id) + if (req(ist->new.phi.m.ref, sl.ref)) + if (ist->new.phi.m.off == sl.off) + if (ist->new.phi.m.sz == sl.sz) { + r = ist->new.phi.p->to; + if (msk != msks) + mask(cls, &r, msk, il); + else + cast(&r, sl.cls, il); + return r; + } + + for (p=b->phi; p; p=p->link) + if (killsl(p->to, sl)) + /* scanning predecessors in that + * case would be unsafe */ + goto Load; + + if (b->npred == 0) + goto Load; + if (b->npred == 1) { + bp = b->pred[0]; + assert(bp->loop >= il->blk->loop); + l = *il; + if (bp->s2) + l.type = LNoLoad; + r1 = def(sl, msk, bp, 0, &l); + if (req(r1, R)) + goto Load; + return r1; + } + + r = newtmp("ld", sl.cls, curf); + p = alloc(sizeof *p); + vgrow(&ilog, ++nlog); + ist = &ilog[nlog-1]; + ist->isphi = 1; + ist->bid = b->id; + ist->new.phi.m = sl; + ist->new.phi.p = p; + p->to = r; + p->cls = sl.cls; + p->narg = b->npred; + p->arg = vnew(p->narg, sizeof p->arg[0], PFn); + p->blk = vnew(p->narg, sizeof p->blk[0], PFn); + for (np=0; npnpred; ++np) { + bp = b->pred[np]; + if (!bp->s2 + && il->type != LNoLoad + && bp->loop < il->blk->loop) + l.type = LLoad; + else + l.type = LNoLoad; + l.blk = bp; + l.off = bp->nins; + r1 = def(sl, msks, bp, 0, &l); + if (req(r1, R)) + goto Load; + p->arg[np] = r1; + p->blk[np] = bp; + /* XXX - multiplicity in predecessors!!! */ + } + if (msk != msks) + mask(cls, &r, msk, il); + return r; +} + +static int +icmp(const void *pa, const void *pb) +{ + Insert *a, *b; + int c; + + a = (Insert *)pa; + b = (Insert *)pb; + if ((c = a->bid - b->bid)) + return c; + if (a->isphi && b->isphi) + return 0; + if (a->isphi) + return -1; + if (b->isphi) + return +1; + if ((c = a->off - b->off)) + return c; + return a->num - b->num; +} + +/* require rpo ssa alias */ +void +loadopt(Fn *fn) +{ + Ins *i, *ib; + Blk *b; + int sz; + uint n, ni, ext, nt; + Insert *ist; + Slice sl; + Loc l; + + curf = fn; + ilog = vnew(0, sizeof ilog[0], PHeap); + nlog = 0; + inum = 0; + for (b=fn->start; b; b=b->link) + for (i=b->ins; i<&b->ins[b->nins]; ++i) { + if (!isload(i->op)) + continue; + sz = loadsz(i); + sl = (Slice){i->arg[0], 0, sz, i->cls}; + l = (Loc){LRoot, i-b->ins, b}; + i->arg[1] = def(sl, MASK(sz), b, i, &l); + } + qsort(ilog, nlog, sizeof ilog[0], icmp); + vgrow(&ilog, nlog+1); + ilog[nlog].bid = fn->nblk; /* add a sentinel */ + ib = vnew(0, sizeof(Ins), PHeap); + for (ist=ilog, n=0; nnblk; ++n) { + b = fn->rpo[n]; + for (; ist->bid == n && ist->isphi; ++ist) { + ist->new.phi.p->link = b->phi; + b->phi = ist->new.phi.p; + } + ni = 0; + nt = 0; + for (;;) { + if (ist->bid == n && ist->off == ni) + i = &ist++->new.ins; + else { + if (ni == b->nins) + break; + i = &b->ins[ni++]; + if (isload(i->op) + && !req(i->arg[1], R)) { + ext = Oextsb + i->op - Oloadsb; + switch (i->op) { + default: + die("unreachable"); + case Oloadsb: + case Oloadub: + case Oloadsh: + case Oloaduh: + i->op = ext; + break; + case Oloadsw: + case Oloaduw: + if (i->cls == Kl) { + i->op = ext; + break; + } + /* fall through */ + case Oload: + i->op = Ocopy; + break; + } + i->arg[0] = i->arg[1]; + i->arg[1] = R; + } + } + vgrow(&ib, ++nt); + ib[nt-1] = *i; + } + idup(b, ib, nt); + } + vfree(ib); + vfree(ilog); + if (debug['M']) { + fprintf(stderr, "\n> After load elimination:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/main.c b/src/qbe/main.c new file mode 100644 index 00000000..ed1ac94d --- /dev/null +++ b/src/qbe/main.c @@ -0,0 +1,212 @@ +#include "all.h" +#include "config.h" +#include +#include + +Target T; + +char debug['Z'+1] = { + ['P'] = 0, /* parsing */ + ['M'] = 0, /* memory optimization */ + ['N'] = 0, /* ssa construction */ + ['C'] = 0, /* copy elimination */ + ['F'] = 0, /* constant folding */ + ['K'] = 0, /* if-conversion */ + ['A'] = 0, /* abi lowering */ + ['I'] = 0, /* instruction selection */ + ['L'] = 0, /* liveness */ + ['S'] = 0, /* spilling */ + ['R'] = 0, /* reg. allocation */ +}; + +extern Target T_amd64_sysv; +extern Target T_amd64_apple; +extern Target T_amd64_win; +extern Target T_arm64; +extern Target T_arm64_apple; +extern Target T_rv64; + +static Target *tlist[] = { + &T_amd64_sysv, + &T_amd64_apple, + &T_amd64_win, + &T_arm64, + &T_arm64_apple, + &T_rv64, + 0 +}; +static FILE *outf; +static int dbg; + +static void +data(Dat *d) +{ + if (dbg) + return; + emitdat(d, outf); + if (d->type == DEnd) { + fputs("/* end data */\n\n", outf); + freeall(); + } +} + +static void +func(Fn *fn) +{ + uint n; + + if (dbg) + fprintf(stderr, "**** Function %s ****", fn->name); + if (debug['P']) { + fprintf(stderr, "\n> After parsing:\n"); + printfn(fn, stderr); + } + T.abi0(fn); + fillcfg(fn); + filluse(fn); + promote(fn); + filluse(fn); + ssa(fn); + filluse(fn); + ssacheck(fn); + fillalias(fn); + loadopt(fn); + filluse(fn); + fillalias(fn); + coalesce(fn); + filluse(fn); + filldom(fn); + ssacheck(fn); + gvn(fn); + fillcfg(fn); + simplcfg(fn); + filluse(fn); + filldom(fn); + gcm(fn); + filluse(fn); + ssacheck(fn); + if (T.cansel) { + ifconvert(fn); + fillcfg(fn); + filluse(fn); + filldom(fn); + ssacheck(fn); + } + T.abi1(fn); + simpl(fn); + fillcfg(fn); + filluse(fn); + T.isel(fn); + fillcfg(fn); + filllive(fn); + fillloop(fn); + fillcost(fn); + spill(fn); + rega(fn); + fillcfg(fn); + simpljmp(fn); + fillcfg(fn); + assert(fn->rpo[0] == fn->start); + for (n=0;; n++) + if (n == fn->nblk-1) { + fn->rpo[n]->link = 0; + break; + } else + fn->rpo[n]->link = fn->rpo[n+1]; + if (!dbg) { + T.emitfn(fn, outf); + fprintf(outf, "/* end function %s */\n\n", fn->name); + } else + fprintf(stderr, "\n"); + freeall(); +} + +static void +dbgfile(char *fn) +{ + emitdbgfile(fn, outf); +} + +int +main(int ac, char *av[]) +{ + Target **t; + FILE *inf, *hf; + char *f, *sep; + int c; + + T = Deftgt; + outf = stdout; + while ((c = getopt(ac, av, "hd:o:t:")) != -1) + switch (c) { + case 'd': + for (; *optarg; optarg++) + if (isalpha(*optarg)) { + debug[toupper(*optarg)] = 1; + dbg = 1; + } + break; + case 'o': + if (strcmp(optarg, "-") != 0) { + outf = fopen(optarg, "w"); + if (!outf) { + fprintf(stderr, "cannot open '%s'\n", optarg); + exit(1); + } + } + break; + case 't': + if (strcmp(optarg, "?") == 0) { + puts(T.name); + exit(0); + } + for (t=tlist;; t++) { + if (!*t) { + fprintf(stderr, "unknown target '%s'\n", optarg); + exit(1); + } + if (strcmp(optarg, (*t)->name) == 0) { + T = **t; + break; + } + } + break; + case 'h': + default: + hf = c != 'h' ? stderr : stdout; + fprintf(hf, "%s [OPTIONS] {file.ssa, -}\n", av[0]); + fprintf(hf, "\t%-11s prints this help\n", "-h"); + fprintf(hf, "\t%-11s output to file\n", "-o file"); + fprintf(hf, "\t%-11s generate for a target among:\n", "-t "); + fprintf(hf, "\t%-11s ", ""); + for (t=tlist, sep=""; *t; t++, sep=", ") { + fprintf(hf, "%s%s", sep, (*t)->name); + if (*t == &Deftgt) + fputs(" (default)", hf); + } + fprintf(hf, "\n"); + fprintf(hf, "\t%-11s dump debug information\n", "-d "); + exit(c != 'h'); + } + + do { + f = av[optind]; + if (!f || strcmp(f, "-") == 0) { + inf = stdin; + f = "-"; + } else { + inf = fopen(f, "r"); + if (!inf) { + fprintf(stderr, "cannot open '%s'\n", f); + exit(1); + } + } + parse(inf, f, dbgfile, data, func); + fclose(inf); + } while (++optind < ac); + + if (!dbg) + T.emitfin(outf); + + exit(0); +} diff --git a/src/qbe/mem.c b/src/qbe/mem.c new file mode 100644 index 00000000..3265b394 --- /dev/null +++ b/src/qbe/mem.c @@ -0,0 +1,488 @@ +#include "all.h" + +typedef struct Range Range; +typedef struct Store Store; +typedef struct Slot Slot; + +/* require use, maintains use counts */ +void +promote(Fn *fn) +{ + Blk *b; + Ins *i, *l; + Tmp *t; + Use *u, *ue; + int s, k; + + /* promote uniform stack slots to temporaries */ + b = fn->start; + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (Oalloc > i->op || i->op > Oalloc1) + continue; + /* specific to NAlign == 3 */ + assert(rtype(i->to) == RTmp); + t = &fn->tmp[i->to.val]; + if (t->ndef != 1) + goto Skip; + k = -1; + s = -1; + for (u=t->use; u<&t->use[t->nuse]; u++) { + if (u->type != UIns) + goto Skip; + l = u->u.ins; + if (isload(l->op)) + if (s == -1 || s == loadsz(l)) { + s = loadsz(l); + continue; + } + if (isstore(l->op)) + if (req(i->to, l->arg[1]) && !req(i->to, l->arg[0])) + if (s == -1 || s == storesz(l)) + if (k == -1 || k == optab[l->op].argcls[0][0]) { + s = storesz(l); + k = optab[l->op].argcls[0][0]; + continue; + } + goto Skip; + } + /* get rid of the alloc and replace uses */ + *i = (Ins){.op = Onop}; + t->ndef--; + ue = &t->use[t->nuse]; + for (u=t->use; u!=ue; u++) { + l = u->u.ins; + if (isstore(l->op)) { + l->cls = k; + l->op = Ocopy; + l->to = l->arg[1]; + l->arg[1] = R; + t->nuse--; + t->ndef++; + } else { + if (k == -1) + err("slot %%%s is read but never stored to", + fn->tmp[l->arg[0].val].name); + /* try to turn loads into copies so we + * can eliminate them later */ + switch(l->op) { + case Oloadsw: + case Oloaduw: + if (k == Kl) + goto Extend; + /* fall through */ + case Oload: + if (KBASE(k) != KBASE(l->cls)) + l->op = Ocast; + else + l->op = Ocopy; + break; + default: + Extend: + l->op = Oextsb + (l->op - Oloadsb); + break; + } + } + } + Skip:; + } + if (debug['M']) { + fprintf(stderr, "\n> After slot promotion:\n"); + printfn(fn, stderr); + } +} + +/* [a, b) with 0 <= a */ +struct Range { + int a, b; +}; + +struct Store { + int ip; + Ins *i; +}; + +struct Slot { + int t; + int sz; + bits m; + bits l; + Range r; + Slot *s; + Store *st; + int nst; +}; + +static inline int +rin(Range r, int n) +{ + return r.a <= n && n < r.b; +} + +static inline int +rovlap(Range r0, Range r1) +{ + return r0.b && r1.b && r0.a < r1.b && r1.a < r0.b; +} + +static void +radd(Range *r, int n) +{ + if (!r->b) + *r = (Range){n, n+1}; + else if (n < r->a) + r->a = n; + else if (n >= r->b) + r->b = n+1; +} + +static int +slot(Slot **ps, int64_t *off, Ref r, Fn *fn, Slot *sl) +{ + Alias a; + Tmp *t; + + getalias(&a, r, fn); + if (a.type != ALoc) + return 0; + t = &fn->tmp[a.base]; + if (t->visit < 0) + return 0; + *off = a.offset; + *ps = &sl[t->visit]; + return 1; +} + +static void +load(Ref r, bits x, int ip, Fn *fn, Slot *sl) +{ + int64_t off; + Slot *s; + + if (slot(&s, &off, r, fn, sl)) { + s->l |= x << off; + s->l &= s->m; + if (s->l) + radd(&s->r, ip); + } +} + +static void +store(Ref r, bits x, int ip, Ins *i, Fn *fn, Slot *sl) +{ + int64_t off; + Slot *s; + + if (slot(&s, &off, r, fn, sl)) { + if (s->l) { + radd(&s->r, ip); + s->l &= ~(x << off); + } else { + vgrow(&s->st, ++s->nst); + s->st[s->nst-1].ip = ip; + s->st[s->nst-1].i = i; + } + } +} + +static int +scmp(const void *pa, const void *pb) +{ + Slot *a, *b; + + a = (Slot *)pa, b = (Slot *)pb; + if (a->sz != b->sz) + return b->sz - a->sz; + return a->r.a - b->r.a; +} + +static void +maxrpo(Blk *hd, Blk *b) +{ + if (hd->loop < (int)b->id) + hd->loop = b->id; +} + +void +coalesce(Fn *fn) +{ + Range r, *br; + Slot *s, *s0, *sl; + Blk *b, **ps, *succ[3]; + Ins *i, **bl; + Use *u; + Tmp *t, *ts; + Ref *arg; + bits x; + int64_t off0, off1; + int n, m, ip, sz, nsl, nbl, *stk; + uint total, freed, fused; + + /* minimize the stack usage + * by coalescing slots + */ + nsl = 0; + sl = vnew(0, sizeof sl[0], PHeap); + for (n=Tmp0; nntmp; n++) { + t = &fn->tmp[n]; + t->visit = -1; + if (t->alias.type == ALoc) + if (t->alias.slot == &t->alias) + if (t->bid == fn->start->id) + if (t->alias.u.loc.sz != -1) { + t->visit = nsl; + vgrow(&sl, ++nsl); + s = &sl[nsl-1]; + s->t = n; + s->sz = t->alias.u.loc.sz; + s->m = t->alias.u.loc.m; + s->s = 0; + s->st = vnew(0, sizeof s->st[0], PHeap); + s->nst = 0; + } + } + + /* one-pass liveness analysis */ + for (b=fn->start; b; b=b->link) + b->loop = -1; + loopiter(fn, maxrpo); + nbl = 0; + bl = vnew(0, sizeof bl[0], PHeap); + br = emalloc(fn->nblk * sizeof br[0]); + ip = INT_MAX - 1; + for (n=fn->nblk-1; n>=0; n--) { + b = fn->rpo[n]; + succ[0] = b->s1; + succ[1] = b->s2; + succ[2] = 0; + br[n].b = ip--; + for (s=sl; s<&sl[nsl]; s++) { + s->l = 0; + for (ps=succ; *ps; ps++) { + m = (*ps)->id; + if (m > n && rin(s->r, br[m].a)) { + s->l = s->m; + radd(&s->r, ip); + } + } + } + if (b->jmp.type == Jretc) + load(b->jmp.arg, -1, --ip, fn, sl); + for (i=&b->ins[b->nins]; i!=b->ins;) { + --i; + arg = i->arg; + if (i->op == Oargc) { + load(arg[1], -1, --ip, fn, sl); + } + if (isload(i->op)) { + x = BIT(loadsz(i)) - 1; + load(arg[0], x, --ip, fn, sl); + } + if (isstore(i->op)) { + x = BIT(storesz(i)) - 1; + store(arg[1], x, ip--, i, fn, sl); + } + if (i->op == Oblit0) { + assert((i+1)->op == Oblit1); + assert(rtype((i+1)->arg[0]) == RInt); + sz = abs(rsval((i+1)->arg[0])); + x = sz >= NBit ? (bits)-1 : BIT(sz) - 1; + store(arg[1], x, ip--, i, fn, sl); + load(arg[0], x, ip, fn, sl); + vgrow(&bl, ++nbl); + bl[nbl-1] = i; + } + } + for (s=sl; s<&sl[nsl]; s++) + if (s->l) { + radd(&s->r, ip); + if (b->loop != -1) { + assert(b->loop >= n); + radd(&s->r, br[b->loop].b - 1); + } + } + br[n].a = ip; + } + free(br); + + /* kill dead stores */ + for (s=sl; s<&sl[nsl]; s++) + for (n=0; nnst; n++) + if (!rin(s->r, s->st[n].ip)) { + i = s->st[n].i; + if (i->op == Oblit0) + *(i+1) = (Ins){.op = Onop}; + *i = (Ins){.op = Onop}; + } + + /* kill slots with an empty live range */ + total = 0; + freed = 0; + stk = vnew(0, sizeof stk[0], PHeap); + n = 0; + for (s=s0=sl; s<&sl[nsl]; s++) { + total += s->sz; + if (!s->r.b) { + vfree(s->st); + vgrow(&stk, ++n); + stk[n-1] = s->t; + freed += s->sz; + } else + *s0++ = *s; + } + nsl = s0-sl; + if (debug['M']) { + fputs("\n> Slot coalescing:\n", stderr); + if (n) { + fputs("\tkill [", stderr); + for (m=0; mtmp[stk[m]].name); + fputs(" ]\n", stderr); + } + } + while (n--) { + t = &fn->tmp[stk[n]]; + assert(t->ndef == 1 && t->def); + i = t->def; + if (isload(i->op)) { + i->op = Ocopy; + i->arg[0] = UNDEF; + continue; + } + *i = (Ins){.op = Onop}; + for (u=t->use; u<&t->use[t->nuse]; u++) { + if (u->type == UJmp) { + b = fn->rpo[u->bid]; + assert(isret(b->jmp.type)); + b->jmp.type = Jret0; + b->jmp.arg = R; + continue; + } + assert(u->type == UIns); + i = u->u.ins; + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + vgrow(&stk, ++n); + stk[n-1] = i->to.val; + } else if (isarg(i->op)) { + assert(i->op == Oargc); + i->arg[1] = CON_Z; /* crash */ + } else { + if (i->op == Oblit0) + *(i+1) = (Ins){.op = Onop}; + *i = (Ins){.op = Onop}; + } + } + } + vfree(stk); + + /* fuse slots by decreasing size */ + qsort(sl, nsl, sizeof *sl, scmp); + fused = 0; + for (n=0; ns) + continue; + s0->s = s0; + r = s0->r; + for (s=s0+1; s<&sl[nsl]; s++) { + if (s->s || !s->r.b) + goto Skip; + if (rovlap(r, s->r)) + /* O(n); can be approximated + * by 'goto Skip;' if need be + */ + for (m=n; &sl[m]r)) + goto Skip; + radd(&r, s->r.a); + radd(&r, s->r.b - 1); + s->s = s0; + fused += s->sz; + Skip:; + } + } + + /* substitute fused slots */ + for (s=sl; s<&sl[nsl]; s++) { + t = &fn->tmp[s->t]; + /* the visit link is stale, + * reset it before the slot() + * calls below + */ + t->visit = s-sl; + assert(t->ndef == 1 && t->def); + if (s->s == s) + continue; + *t->def = (Ins){.op = Onop}; + ts = &fn->tmp[s->s->t]; + assert(t->bid == ts->bid); + if (t->def < ts->def) { + /* make sure the slot we + * selected has a def that + * dominates its new uses + */ + *t->def = *ts->def; + *ts->def = (Ins){.op = Onop}; + ts->def = t->def; + } + for (u=t->use; u<&t->use[t->nuse]; u++) { + if (u->type == UJmp) { + b = fn->rpo[u->bid]; + b->jmp.arg = TMP(s->s->t); + continue; + } + assert(u->type == UIns); + arg = u->u.ins->arg; + for (n=0; n<2; n++) + if (req(arg[n], TMP(s->t))) + arg[n] = TMP(s->s->t); + } + } + + /* fix newly overlapping blits */ + for (n=0; nop == Oblit0) + if (slot(&s, &off0, i->arg[0], fn, sl)) + if (slot(&s0, &off1, i->arg[1], fn, sl)) + if (s->s == s0->s) { + if (off0 < off1) { + sz = rsval((i+1)->arg[0]); + assert(sz >= 0); + (i+1)->arg[0] = INT(-sz); + } else if (off0 == off1) { + *i = (Ins){.op = Onop}; + *(i+1) = (Ins){.op = Onop}; + } + } + } + vfree(bl); + + if (debug['M']) { + for (s0=sl; s0<&sl[nsl]; s0++) { + if (s0->s != s0) + continue; + fprintf(stderr, "\tfuse (% 3db) [", s0->sz); + for (s=s0; s<&sl[nsl]; s++) { + if (s->s != s0) + continue; + fprintf(stderr, " %%%s", fn->tmp[s->t].name); + if (s->r.b) + fprintf(stderr, "[%d,%d)", + s->r.a-ip, s->r.b-ip); + else + fputs("{}", stderr); + } + fputs(" ]\n", stderr); + } + fprintf(stderr, "\tsums %u/%u/%u (killed/fused/total)\n\n", + freed, fused, total); + printfn(fn, stderr); + } + + for (s=sl; s<&sl[nsl]; s++) + vfree(s->st); + vfree(sl); +} diff --git a/src/qbe/minic/.gitignore b/src/qbe/minic/.gitignore new file mode 100644 index 00000000..679c4114 --- /dev/null +++ b/src/qbe/minic/.gitignore @@ -0,0 +1,4 @@ +minic +yacc +y.* +*.out diff --git a/src/qbe/minic/Makefile b/src/qbe/minic/Makefile new file mode 100644 index 00000000..856f2d32 --- /dev/null +++ b/src/qbe/minic/Makefile @@ -0,0 +1,12 @@ +BIN = minic + +CFLAGS += -g -Wall + +$(BIN): yacc minic.y + ./yacc minic.y + $(CC) $(CFLAGS) -o $@ y.tab.c + +clean: + rm -f yacc minic y.* + +.PHONY: clean diff --git a/src/qbe/minic/mcc b/src/qbe/minic/mcc new file mode 100755 index 00000000..492947e8 --- /dev/null +++ b/src/qbe/minic/mcc @@ -0,0 +1,44 @@ +#!/bin/sh + +DIR=`cd $(dirname $0); pwd` +QBE=$DIR/../qbe + +usage() +{ + echo "usage: mcc [LDFLAGS] file.c" >&2 + exit 1 +} + +for i +do + case $i in + -*) + flags="$flags $i" + ;; + *) + if ! test -z $file + then + usage + fi + file=$i + ;; + esac +done + +if test -z $file +then + usage +fi + + +$DIR/minic < $file > /tmp/minic.ssa && +$QBE < /tmp/minic.ssa > /tmp/minic.s && +cc /tmp/minic.s $flags + +if test $? -ne 0 +then + echo "error processing file $file" >&2 + exit 1 +fi + + diff --git a/src/qbe/minic/minic.y b/src/qbe/minic/minic.y new file mode 100644 index 00000000..448f38a9 --- /dev/null +++ b/src/qbe/minic/minic.y @@ -0,0 +1,951 @@ +%{ + +#include +#include +#include +#include + +enum { + NString = 32, + NGlo = 256, + NVar = 512, + NStr = 256, +}; + +enum { /* minic types */ + NIL, + INT, + LNG, + PTR, + FUN, +}; + +#define IDIR(x) (((x) << 3) + PTR) +#define FUNC(x) (((x) << 3) + FUN) +#define DREF(x) ((x) >> 3) +#define KIND(x) ((x) & 7) +#define SIZE(x) \ + (x == NIL ? (die("void has no size"), 0) : \ + x == INT ? 4 : 8) + +typedef struct Node Node; +typedef struct Symb Symb; +typedef struct Stmt Stmt; + +struct Symb { + enum { + Con, + Tmp, + Var, + Glo, + } t; + union { + int n; + char v[NString]; + } u; + unsigned long ctyp; +}; + +struct Node { + char op; + union { + int n; + char v[NString]; + Symb s; + } u; + Node *l, *r; +}; + +struct Stmt { + enum { + If, + While, + Seq, + Expr, + Break, + Ret, + } t; + void *p1, *p2, *p3; +}; + +int yylex(void), yyerror(char *); +Symb expr(Node *), lval(Node *); +void branch(Node *, int, int); + +FILE *of; +int line; +int lbl, tmp, nglo; +char *ini[NGlo]; +struct { + char v[NString]; + unsigned ctyp; + int glo; +} varh[NVar]; + +void +die(char *s) +{ + fprintf(stderr, "error:%d: %s\n", line, s); + exit(1); +} + +void * +alloc(size_t s) +{ + void *p; + + p = malloc(s); + if (!p) + die("out of memory"); + return p; +} + +unsigned +hash(char *s) +{ + unsigned h; + + h = 42; + while (*s) + h += 11 * h + *s++; + return h % NVar; +} + +void +varclr() +{ + unsigned h; + + for (h=0; ht = Tmp; + s->ctyp = LNG; + s->u.n = tmp++; +} + +unsigned +prom(int op, Symb *l, Symb *r) +{ + Symb *t; + int sz; + + if (l->ctyp == r->ctyp && KIND(l->ctyp) != PTR) + return l->ctyp; + + if (l->ctyp == LNG && r->ctyp == INT) { + sext(r); + return LNG; + } + if (l->ctyp == INT && r->ctyp == LNG) { + sext(l); + return LNG; + } + + if (op == '+') { + if (KIND(r->ctyp) == PTR) { + t = l; + l = r; + r = t; + } + if (KIND(r->ctyp) == PTR) + die("pointers added"); + goto Scale; + } + + if (op == '-') { + if (KIND(l->ctyp) != PTR) + die("pointer substracted from integer"); + if (KIND(r->ctyp) != PTR) + goto Scale; + if (l->ctyp != r->ctyp) + die("non-homogeneous pointers in substraction"); + return LNG; + } + +Scale: + sz = SIZE(DREF(l->ctyp)); + if (r->t == Con) + r->u.n *= sz; + else { + if (irtyp(r->ctyp) != 'l') + sext(r); + fprintf(of, "\t%%t%d =l mul %d, ", tmp, sz); + psymb(*r); + fprintf(of, "\n"); + r->u.n = tmp++; + } + return l->ctyp; +} + +void +load(Symb d, Symb s) +{ + char t; + + fprintf(of, "\t"); + psymb(d); + t = irtyp(d.ctyp); + fprintf(of, " =%c load%c ", t, t); + psymb(s); + fprintf(of, "\n"); +} + +void +call(Node *n, Symb *sr) +{ + Node *a; + char *f; + unsigned ft; + + f = n->l->u.v; + if (varget(f)) { + ft = varget(f)->ctyp; + if (KIND(ft) != FUN) + die("invalid call"); + } else + ft = FUNC(INT); + sr->ctyp = DREF(ft); + for (a=n->r; a; a=a->r) + a->u.s = expr(a->l); + fprintf(of, "\t"); + psymb(*sr); + fprintf(of, " =%c call $%s(", irtyp(sr->ctyp), f); + for (a=n->r; a; a=a->r) { + fprintf(of, "%c ", irtyp(a->u.s.ctyp)); + psymb(a->u.s); + fprintf(of, ", "); + } + fprintf(of, "...)\n"); +} + +Symb +expr(Node *n) +{ + static char *otoa[] = { + ['+'] = "add", + ['-'] = "sub", + ['*'] = "mul", + ['/'] = "div", + ['%'] = "rem", + ['&'] = "and", + ['<'] = "cslt", /* meeeeh, wrong for pointers! */ + ['l'] = "csle", + ['e'] = "ceq", + ['n'] = "cne", + }; + Symb sr, s0, s1, sl; + int o, l; + char ty[2]; + + sr.t = Tmp; + sr.u.n = tmp++; + + switch (n->op) { + + case 0: + abort(); + + case 'o': + case 'a': + l = lbl; + lbl += 3; + branch(n, l, l+1); + fprintf(of, "@l%d\n", l); + fprintf(of, "\tjmp @l%d\n", l+2); + fprintf(of, "@l%d\n", l+1); + fprintf(of, "\tjmp @l%d\n", l+2); + fprintf(of, "@l%d\n", l+2); + fprintf(of, "\t"); + sr.ctyp = INT; + psymb(sr); + fprintf(of, " =w phi @l%d 1, @l%d 0\n", l, l+1); + break; + + case 'V': + s0 = lval(n); + sr.ctyp = s0.ctyp; + load(sr, s0); + break; + + case 'N': + sr.t = Con; + sr.u.n = n->u.n; + sr.ctyp = INT; + break; + + case 'S': + sr.t = Glo; + sr.u.n = n->u.n; + sr.ctyp = IDIR(INT); + break; + + case 'C': + call(n, &sr); + break; + + case '@': + s0 = expr(n->l); + if (KIND(s0.ctyp) != PTR) + die("dereference of a non-pointer"); + sr.ctyp = DREF(s0.ctyp); + load(sr, s0); + break; + + case 'A': + sr = lval(n->l); + sr.ctyp = IDIR(sr.ctyp); + break; + + case '=': + s0 = expr(n->r); + s1 = lval(n->l); + sr = s0; + if (s1.ctyp == LNG && s0.ctyp == INT) + sext(&s0); + if (s0.ctyp != IDIR(NIL) || KIND(s1.ctyp) != PTR) + if (s1.ctyp != IDIR(NIL) || KIND(s0.ctyp) != PTR) + if (s1.ctyp != s0.ctyp) + die("invalid assignment"); + fprintf(of, "\tstore%c ", irtyp(s1.ctyp)); + goto Args; + + case 'P': + case 'M': + o = n->op == 'P' ? '+' : '-'; + sl = lval(n->l); + s0.t = Tmp; + s0.u.n = tmp++; + s0.ctyp = sl.ctyp; + load(s0, sl); + s1.t = Con; + s1.u.n = 1; + s1.ctyp = INT; + goto Binop; + + default: + s0 = expr(n->l); + s1 = expr(n->r); + o = n->op; + Binop: + sr.ctyp = prom(o, &s0, &s1); + if (strchr("neop)) { + sprintf(ty, "%c", irtyp(sr.ctyp)); + sr.ctyp = INT; + } else + strcpy(ty, ""); + fprintf(of, "\t"); + psymb(sr); + fprintf(of, " =%c", irtyp(sr.ctyp)); + fprintf(of, " %s%s ", otoa[o], ty); + Args: + psymb(s0); + fprintf(of, ", "); + psymb(s1); + fprintf(of, "\n"); + break; + + } + if (n->op == '-' + && KIND(s0.ctyp) == PTR + && KIND(s1.ctyp) == PTR) { + fprintf(of, "\t%%t%d =l div ", tmp); + psymb(sr); + fprintf(of, ", %d\n", SIZE(DREF(s0.ctyp))); + sr.u.n = tmp++; + } + if (n->op == 'P' || n->op == 'M') { + fprintf(of, "\tstore%c ", irtyp(sl.ctyp)); + psymb(sr); + fprintf(of, ", "); + psymb(sl); + fprintf(of, "\n"); + sr = s0; + } + return sr; +} + +Symb +lval(Node *n) +{ + Symb sr; + + switch (n->op) { + default: + die("invalid lvalue"); + case 'V': + if (!varget(n->u.v)) + die("undefined variable"); + sr = *varget(n->u.v); + break; + case '@': + sr = expr(n->l); + if (KIND(sr.ctyp) != PTR) + die("dereference of a non-pointer"); + sr.ctyp = DREF(sr.ctyp); + break; + } + return sr; +} + +void +branch(Node *n, int lt, int lf) +{ + Symb s; + int l; + + switch (n->op) { + default: + s = expr(n); /* TODO: insert comparison to 0 with proper type */ + fprintf(of, "\tjnz "); + psymb(s); + fprintf(of, ", @l%d, @l%d\n", lt, lf); + break; + case 'o': + l = lbl; + lbl += 1; + branch(n->l, lt, l); + fprintf(of, "@l%d\n", l); + branch(n->r, lt, lf); + break; + case 'a': + l = lbl; + lbl += 1; + branch(n->l, l, lf); + fprintf(of, "@l%d\n", l); + branch(n->r, lt, lf); + break; + } +} + +int +stmt(Stmt *s, int b) +{ + int l, r; + Symb x; + + if (!s) + return 0; + + switch (s->t) { + case Ret: + x = expr(s->p1); + fprintf(of, "\tret "); + psymb(x); + fprintf(of, "\n"); + return 1; + case Break: + if (b < 0) + die("break not in loop"); + fprintf(of, "\tjmp @l%d\n", b); + return 1; + case Expr: + expr(s->p1); + return 0; + case Seq: + return stmt(s->p1, b) || stmt(s->p2, b); + case If: + l = lbl; + lbl += 3; + branch(s->p1, l, l+1); + fprintf(of, "@l%d\n", l); + if (!(r=stmt(s->p2, b))) + if (s->p3) + fprintf(of, "\tjmp @l%d\n", l+2); + fprintf(of, "@l%d\n", l+1); + if (s->p3) + if (!(r &= stmt(s->p3, b))) + fprintf(of, "@l%d\n", l+2); + return s->p3 && r; + case While: + l = lbl; + lbl += 3; + fprintf(of, "@l%d\n", l); + branch(s->p1, l+1, l+2); + fprintf(of, "@l%d\n", l+1); + if (!stmt(s->p2, l+2)) + fprintf(of, "\tjmp @l%d\n", l); + fprintf(of, "@l%d\n", l+2); + return 0; + } +} + +Node * +mknode(char op, Node *l, Node *r) +{ + Node *n; + + n = alloc(sizeof *n); + n->op = op; + n->l = l; + n->r = r; + return n; +} + +Node * +mkidx(Node *a, Node *i) +{ + Node *n; + + n = mknode('+', a, i); + n = mknode('@', n, 0); + return n; +} + +Node * +mkneg(Node *n) +{ + static Node *z; + + if (!z) { + z = mknode('N', 0, 0); + z->u.n = 0; + } + return mknode('-', z, n); +} + +Stmt * +mkstmt(int t, void *p1, void *p2, void *p3) +{ + Stmt *s; + + s = alloc(sizeof *s); + s->t = t; + s->p1 = p1; + s->p2 = p2; + s->p3 = p3; + return s; +} + +Node * +param(char *v, unsigned ctyp, Node *pl) +{ + Node *n; + + if (ctyp == NIL) + die("invalid void declaration"); + n = mknode(0, 0, pl); + varadd(v, 0, ctyp); + strcpy(n->u.v, v); + return n; +} + +Stmt * +mkfor(Node *ini, Node *tst, Node *inc, Stmt *s) +{ + Stmt *s1, *s2; + + if (ini) + s1 = mkstmt(Expr, ini, 0, 0); + else + s1 = 0; + if (inc) { + s2 = mkstmt(Expr, inc, 0, 0); + s2 = mkstmt(Seq, s, s2, 0); + } else + s2 = s; + if (!tst) { + tst = mknode('N', 0, 0); + tst->u.n = 1; + } + s2 = mkstmt(While, tst, s2, 0); + if (s1) + return mkstmt(Seq, s1, s2, 0); + else + return s2; +} + +%} + +%union { + Node *n; + Stmt *s; + unsigned u; +} + +%token NUM +%token STR +%token IDENT +%token PP MM LE GE SIZEOF + +%token TVOID TINT TLNG +%token IF ELSE WHILE FOR BREAK RETURN + +%right '=' +%left OR +%left AND +%left '&' +%left EQ NE +%left '<' '>' LE GE +%left '+' '-' +%left '*' '/' '%' + +%type type +%type stmt stmts +%type expr exp0 pref post arg0 arg1 par0 par1 + +%% + +prog: func prog | fdcl prog | idcl prog | ; + +fdcl: type IDENT '(' ')' ';' +{ + varadd($2->u.v, 1, FUNC($1)); +}; + +idcl: type IDENT ';' +{ + if ($1 == NIL) + die("invalid void declaration"); + if (nglo == NGlo) + die("too many string literals"); + ini[nglo] = alloc(sizeof "{ x 0 }"); + sprintf(ini[nglo], "{ %c 0 }", irtyp($1)); + varadd($2->u.v, nglo++, $1); +}; + +init: +{ + varclr(); + tmp = 0; +}; + +func: init prot '{' dcls stmts '}' +{ + if (!stmt($5, -1)) + fprintf(of, "\tret 0\n"); + fprintf(of, "}\n\n"); +}; + +prot: IDENT '(' par0 ')' +{ + Symb *s; + Node *n; + int t, m; + + varadd($1->u.v, 1, FUNC(INT)); + fprintf(of, "export function w $%s(", $1->u.v); + n = $3; + if (n) + for (;;) { + s = varget(n->u.v); + fprintf(of, "%c ", irtyp(s->ctyp)); + fprintf(of, "%%t%d", tmp++); + n = n->r; + if (n) + fprintf(of, ", "); + else + break; + } + fprintf(of, ") {\n"); + fprintf(of, "@l%d\n", lbl++); + for (t=0, n=$3; n; t++, n=n->r) { + s = varget(n->u.v); + m = SIZE(s->ctyp); + fprintf(of, "\t%%%s =l alloc%d %d\n", n->u.v, m, m); + fprintf(of, "\tstore%c %%t%d", irtyp(s->ctyp), t); + fprintf(of, ", %%%s\n", n->u.v); + } +}; + +par0: par1 + | { $$ = 0; } + ; +par1: type IDENT ',' par1 { $$ = param($2->u.v, $1, $4); } + | type IDENT { $$ = param($2->u.v, $1, 0); } + ; + + +dcls: | dcls type IDENT ';' +{ + int s; + char *v; + + if ($2 == NIL) + die("invalid void declaration"); + v = $3->u.v; + s = SIZE($2); + varadd(v, 0, $2); + fprintf(of, "\t%%%s =l alloc%d %d\n", v, s, s); +}; + +type: type '*' { $$ = IDIR($1); } + | TINT { $$ = INT; } + | TLNG { $$ = LNG; } + | TVOID { $$ = NIL; } + ; + +stmt: ';' { $$ = 0; } + | '{' stmts '}' { $$ = $2; } + | BREAK ';' { $$ = mkstmt(Break, 0, 0, 0); } + | RETURN expr ';' { $$ = mkstmt(Ret, $2, 0, 0); } + | expr ';' { $$ = mkstmt(Expr, $1, 0, 0); } + | WHILE '(' expr ')' stmt { $$ = mkstmt(While, $3, $5, 0); } + | IF '(' expr ')' stmt ELSE stmt { $$ = mkstmt(If, $3, $5, $7); } + | IF '(' expr ')' stmt { $$ = mkstmt(If, $3, $5, 0); } + | FOR '(' exp0 ';' exp0 ';' exp0 ')' stmt + { $$ = mkfor($3, $5, $7, $9); } + ; + +stmts: stmts stmt { $$ = mkstmt(Seq, $1, $2, 0); } + | { $$ = 0; } + ; + +expr: pref + | expr '=' expr { $$ = mknode('=', $1, $3); } + | expr '+' expr { $$ = mknode('+', $1, $3); } + | expr '-' expr { $$ = mknode('-', $1, $3); } + | expr '*' expr { $$ = mknode('*', $1, $3); } + | expr '/' expr { $$ = mknode('/', $1, $3); } + | expr '%' expr { $$ = mknode('%', $1, $3); } + | expr '<' expr { $$ = mknode('<', $1, $3); } + | expr '>' expr { $$ = mknode('<', $3, $1); } + | expr LE expr { $$ = mknode('l', $1, $3); } + | expr GE expr { $$ = mknode('l', $3, $1); } + | expr EQ expr { $$ = mknode('e', $1, $3); } + | expr NE expr { $$ = mknode('n', $1, $3); } + | expr '&' expr { $$ = mknode('&', $1, $3); } + | expr AND expr { $$ = mknode('a', $1, $3); } + | expr OR expr { $$ = mknode('o', $1, $3); } + ; + +exp0: expr + | { $$ = 0; } + ; + +pref: post + | '-' pref { $$ = mkneg($2); } + | '*' pref { $$ = mknode('@', $2, 0); } + | '&' pref { $$ = mknode('A', $2, 0); } + ; + +post: NUM + | STR + | IDENT + | SIZEOF '(' type ')' { $$ = mknode('N', 0, 0); $$->u.n = SIZE($3); } + | '(' expr ')' { $$ = $2; } + | IDENT '(' arg0 ')' { $$ = mknode('C', $1, $3); } + | post '[' expr ']' { $$ = mkidx($1, $3); } + | post PP { $$ = mknode('P', $1, 0); } + | post MM { $$ = mknode('M', $1, 0); } + ; + +arg0: arg1 + | { $$ = 0; } + ; +arg1: expr { $$ = mknode(0, $1, 0); } + | expr ',' arg1 { $$ = mknode(0, $1, $3); } + ; + +%% + +int +yylex() +{ + struct { + char *s; + int t; + } kwds[] = { + { "void", TVOID }, + { "int", TINT }, + { "long", TLNG }, + { "if", IF }, + { "else", ELSE }, + { "for", FOR }, + { "while", WHILE }, + { "return", RETURN }, + { "break", BREAK }, + { "sizeof", SIZEOF }, + { 0, 0 } + }; + int i, c, c1, n; + char v[NString], *p; + + do { + c = getchar(); + if (c == '#') + while ((c = getchar()) != '\n') + ; + if (c == '\n') + line++; + } while (isspace(c)); + + + if (c == EOF) + return 0; + + + if (isdigit(c)) { + n = 0; + do { + n *= 10; + n += c-'0'; + c = getchar(); + } while (isdigit(c)); + ungetc(c, stdin); + yylval.n = mknode('N', 0, 0); + yylval.n->u.n = n; + return NUM; + } + + if (isalpha(c)) { + p = v; + do { + if (p == &v[NString-1]) + die("ident too long"); + *p++ = c; + c = getchar(); + } while (isalpha(c) || c == '_'); + *p = 0; + ungetc(c, stdin); + for (i=0; kwds[i].s; i++) + if (strcmp(v, kwds[i].s) == 0) + return kwds[i].t; + yylval.n = mknode('V', 0, 0); + strcpy(yylval.n->u.v, v); + return IDENT; + } + + if (c == '"') { + i = 0; + n = 32; + p = alloc(n); + strcpy(p, "{ b \""); + for (i=5;; i++) { + c = getchar(); + if (c == EOF) + die("unclosed string literal"); + if (i+8 >= n) { + p = memcpy(alloc(n*2), p, n); + n *= 2; + } + p[i] = c; + if (c == '"' && p[i-1]!='\\') + break; + } + strcpy(&p[i], "\", b 0 }"); + if (nglo == NGlo) + die("too many globals"); + ini[nglo] = p; + yylval.n = mknode('S', 0, 0); + yylval.n->u.n = nglo++; + return STR; + } + + c1 = getchar(); +#define DI(a, b) a + b*256 + switch (DI(c,c1)) { + case DI('!','='): return NE; + case DI('=','='): return EQ; + case DI('<','='): return LE; + case DI('>','='): return GE; + case DI('+','+'): return PP; + case DI('-','-'): return MM; + case DI('&','&'): return AND; + case DI('|','|'): return OR; + } +#undef DI + ungetc(c1, stdin); + + return c; +} + +int +yyerror(char *err) +{ + die("parse error"); + return 0; +} + +int +main() +{ + int i; + + of = stdout; + nglo = 1; + if (yyparse() != 0) + die("parse error"); + for (i=1; i cmax) + cmax = c; + } + printf("should print 178: %d\n", cmax); +} diff --git a/src/qbe/minic/test/euler9.c b/src/qbe/minic/test/euler9.c new file mode 100644 index 00000000..ec85b863 --- /dev/null +++ b/src/qbe/minic/test/euler9.c @@ -0,0 +1,27 @@ +#include + +main() +{ + int i; + int a; + int b; + int c; + int d; + + for (a = 1; a < 1000; a++) { + for (b = a + 1; b < 1000; b++) { + d = a*a + b*b; + for (i = 0; i < 1000; i++) { + if (i * i == d) { + c = i; + if (b < c && a+b+c == 1000) { + printf("%d\n", a*b*c); + return 0; + } + break; + } + } + } + } +} + diff --git a/src/qbe/minic/test/knight.c b/src/qbe/minic/test/knight.c new file mode 100644 index 00000000..273e651e --- /dev/null +++ b/src/qbe/minic/test/knight.c @@ -0,0 +1,60 @@ +#include +#include + +void *calloc(); + +int N; +int **b; + +board() +{ + int x; + int y; + + for (y=0; y<8; y++) { + for (x=0; x<8; x++) + printf(" %02d", b[x][y]); + printf("\n"); + } + printf("\n"); + return 0; +} + +chk(int x, int y) +{ + if (x < 0 || x > 7 || y < 0 || y > 7) + return 0; + return b[x][y] == 0; +} + +go(int k, int x, int y) +{ + int i; + int j; + + b[x][y] = k; + if (k == 64) { + if (x != 2 && y != 0 && abs(x-2) + abs(y) == 3) { + board(); + N++; + if (N == 10) + exit(0); + } + } else + for (i=-2; i<=2; i++) + for (j=-2; j<=2; j++) + if (abs(i) + abs(j) == 3 && chk(x+i, y+j)) + go(k+1, x+i, y+j); + b[x][y] = 0; + return 0; +} + +main() +{ + int i; + + b = calloc(8, sizeof (int *)); + for (i=0; i<8; i++) + b[i] = calloc(8, sizeof (int)); + go(1, 2, 0); +} diff --git a/src/qbe/minic/test/mandel.c b/src/qbe/minic/test/mandel.c new file mode 100644 index 00000000..68049a3b --- /dev/null +++ b/src/qbe/minic/test/mandel.c @@ -0,0 +1,88 @@ +void *malloc(); +void *SDL_CreateWindow(); +void *SDL_CreateRenderer(); +int SDL_SetRenderDrawColor(); +int SDL_RenderDrawPoint(); +int SDL_RenderClear(); +int SDL_RenderPresent(); +int SDL_PollEvent(); +int SDL_DestroyRenderer(); +int SDL_DestroyWindow(); +int SDL_Quit(); +int SDL_Init(); + +void *win; +void *rnd; +int W; +int H; +int *col; + +plot(int x, int y) +{ + int n; + int fx; + int fy; + int zx; + int zy; + int nx; + int ny; + + fx = (x - W/2)*4000 / W; + fy = (y - H/2)*4000 / H; + zx = fx; + zy = fy; + + for (n=0; n<200; n++) { + if (zx*zx + zy*zy > 4000000) + break; + nx = (zx*zx)/1000 - (zy*zy)/1000 + fx; + ny = zx*zy/500 + fy; + zx = nx; + zy = ny; + } + n = col[n]; + SDL_SetRenderDrawColor(rnd, 100, n, n, 255); + SDL_RenderDrawPoint(rnd, x, y); + return 0; +} + +main() { + int c; + int n; + int x; + int y; + void *e; + int *ie; + + W = 800; + H = 800; + SDL_Init(32); + win = SDL_CreateWindow("Mandelbrot MiniC", 0, 0, W, H, 0); + rnd = SDL_CreateRenderer(win, -1, 0); + e = malloc(56); + ie = e; + col = malloc(201 * sizeof (int)); + c = 20; + for (n=0; n<200; n++) { + col[n] = c; + c = c + (255-c)/8; + } + col[n] = 30; + + SDL_RenderClear(rnd); + for (x=0; x + +main() { + int n; + int t; + int c; + int p; + + c = 0; + n = 2; + while (n < 5000) { + t = 2; + p = 1; + while (t*t <= n) { + if (n % t == 0) + p = 0; + t++; + } + if (p) { + if (c && c % 10 == 0) + printf("\n"); + printf("%4d ", n); + c++; + } + n++; + } + printf("\n"); +} diff --git a/src/qbe/minic/test/queen.c b/src/qbe/minic/test/queen.c new file mode 100644 index 00000000..7185321b --- /dev/null +++ b/src/qbe/minic/test/queen.c @@ -0,0 +1,70 @@ +int printf(); +void *calloc(); +int atoi(); + +int Q; +int N; +int **t; + +print() { + int x; + int y; + + for (y=0; y= 0) + r = r + t[x+i][y-i]; + if (x-i >= 0 & y+i < Q) + r = r + t[x-i][y+i]; + if (x-i >= 0 & y-i >= 0) + r = r + t[x-i][y-i]; + } + return r; +} + +go(int y) { + int x; + + if (y == Q) { + print(); + N++; + return 0; + } + for (x=0; x= 2) + Q = atoi(av[1]); + t = calloc(Q, sizeof(int *)); + for (i=0; i +#include +#include +#include +#include + +typedef int Sym; +typedef struct Rule Rule; +typedef struct TSet TSet; +typedef struct Info Info; +typedef struct Term Term; +typedef struct Item Item; +typedef struct Row Row; + +#define S ((Sym) -1) +#define Red(n) (- (n+2)) /* involutive, Red(Red(x)) == x */ +#define GetBit(s,n) (s[n/32] & (1<<(n%32))) +#define SetBit(s,n) (s[n/32] |= 1<<(n%32)) + +enum { + IdntSz = 64, + MaxRhs = 32, + MaxTk = 500, + MaxNt = 500, + MaxRl = 800, + MaxTm = 1000, + + TSetSz = (MaxTk+31)/32, + Sym0 = MaxTk +}; + +struct Rule { + Sym lhs; + Sym rhs[MaxRhs]; + char *act; + int actln; + int prec; +}; + +struct TSet { + unsigned t[TSetSz]; +}; + +struct Info { + int nul; + TSet fst; + int prec; + enum { + ANone, + ALeft, + ARight, + ANonassoc + } assoc; + char name[IdntSz]; + char type[IdntSz]; +}; + +struct Term { + Rule *rule; + int dot; + TSet lk; +}; + +struct Item { + int id; + int nt; + Term ts[MaxTm]; + Item **gtbl; + int dirty; +}; + +struct Row { + int def; + int ndef; + int *t; +}; + +char srs[] = "shift/reduce conflict state %d token %s\n"; +char rrs[] = "reduce/reduce conflict state %d token %s\n"; + +Item i0; /* temporary item */ + +int nrl, nsy, nst, ntk; +Rule rs[MaxRl]; /* grammar rules (ordered, rcmp) */ +Info is[MaxTk+MaxNt]; /* symbol information */ +Item **st; /* LALR(1) states (ordered, icmp) */ +Row *as; /* action table [state][tok] */ +Row *gs; /* goto table [sym][state] */ +Sym sstart;/* start symbol */ +Item *ini; /* initial state */ +int doty; /* type-checking enabled */ + +int srconf, rrconf; +int actsz; +int *act; +int *chk; +int *adsp; +int *gdsp; + +int lineno = 1; +char *srca; +FILE *fin; +FILE *fout; +FILE *fgrm; +FILE *fhdr; + +void +die(char *s) +{ + fprintf(stderr, "%s (on line %d)\n", s, lineno); + exit(1); +} + +void * +yalloc(size_t n, size_t o) +{ + void *p; + + p = calloc(n, o); + if (!p) + die("out of memory"); + return p; +} + +int +rcmp(const void *a, const void *b) +{ + return ((Rule *)a)->lhs - ((Rule *)b)->lhs; +} + +Rule * +rfind(Sym lhs) +{ + Rule *r; + Rule k; + + k.lhs = lhs; + r = bsearch(&k, rs, nrl, sizeof *r, rcmp); + if (r != 0) + while (r > rs && r[-1].lhs == lhs) + r--; + return r; +} + +int +slen(Sym *l) +{ + int n; + + for (n=0; *l!=S; n++, l++); + return n; +} + +void +tszero(TSet *ts) +{ + memset(ts, 0, sizeof *ts); +} + +int +tsunion(TSet *tsa, TSet *tsb) +{ + int n; + unsigned *a, *b, c, t; + + c = 0; + a = tsa->t; + b = tsb->t; + n = (31+ntk)/32; + while (n-- > 0) { + t = *a; + *a |= *b++; + c |= t ^ *a++; + } + return !!c; +} + +void +first(TSet *ts, Sym *stnc, TSet *last) +{ + Sym f; + + f = stnc[0]; + if (f == S) { + if (last) + tsunion(ts, last); + return; + } + if (f < ntk) { + SetBit(ts->t, f); + return; + } + if (is[f].nul) + first(ts, stnc+1, last); + tsunion(ts, &is[f].fst); +} + +void +ginit() +{ + int chg; + Rule *r; + Info *i; + Sym *s; + TSet ts; + + do { + chg = 0; + for (r=rs; r-rslhs]; + for (s=r->rhs; *s!=S; s++) + if (!is[*s].nul) + goto nonul; + chg |= i->nul == 0; + i->nul = 1; + nonul: + tszero(&ts); + first(&ts, r->rhs, 0); + chg |= tsunion(&i->fst, &ts); + } + } while (chg); +} + +int +tcmp(Term *a, Term *b) +{ + int c; + + c = a->rule - b->rule; + if (c==0) + c = a->dot - b->dot; + return c; +} + +int +tcmpv(const void *a, const void *b) +{ + return tcmp((Term *)a, (Term *)b); +} + +void +iclose(Item *i) +{ + int smap[MaxNt]; + Rule *r; + Term *t, t1; + Sym s, *rem; + int chg, n, m; + + t1.dot = 0; + memset(smap, 0, sizeof smap); + for (n=0; nnt; n++) { + t = &i->ts[n]; + s = t->rule->lhs-Sym0; + if (t->dot==0) + if (smap[s]==0) + smap[s] = n; + } + do { + chg = 0; + for (n=0; nnt; n++) { + t = &i->ts[n]; + rem = &t->rule->rhs[t->dot]; + s = *rem++; + if (s < Sym0 || s == S) + continue; + r = rfind(s); + if (!r) + die("some non-terminals are not defined"); + tszero(&t1.lk); + first(&t1.lk, rem, &t->lk); + m = smap[s-Sym0]; + if (m) + for (; r-rslhs==s; r++, m++) + chg |= tsunion(&i->ts[m].lk, &t1.lk); + else { + m = i->nt; + smap[s-Sym0] = m; + for (; r-rslhs==s; r++, m++) { + if (m>=MaxTm) + die("too many terms in item"); + t1.rule = r; + i->ts[m] = t1; + } + i->nt = m; + chg = 1; + } + } + } while (chg); +} + +void +igoto(Item *i, Sym s) +{ + Term *t, *t1; + int n; + + i0.nt = 0; + for (n=0, t=i->ts; nnt; n++, t++) { + if (t->rule->rhs[t->dot] != s) + continue; + t1 = &i0.ts[i0.nt++]; + *t1 = *t; + t1->dot++; + } + qsort(i0.ts, i0.nt, sizeof i0.ts[0], tcmpv); +} + +int +icmp(Item *a, Item *b) +{ + Term *ta, *tb, *ma, *mb; + int c; + + ta = a->ts; + tb = b->ts; + ma = ta+a->nt; + mb = tb+b->nt; + for (;;) { + if (ta==ma || ta->dot==0) + return -(tbdot); + if (tb==mb || tb->dot==0) + return +(tadot); + if ((c=tcmp(ta++, tb++))) + return c; + } +} + +int +stadd(Item **pi) +{ + Item *i, *i1; + int lo, hi, mid, n, chg; + + /* http://www.iq0.com/duffgram/bsearch.c */ + i = *pi; + lo = 0; + hi = nst - 1; + if (hi<0 || icmp(i, st[hi])>0) + hi++; + else if (icmp(i, st[lo])<=0) + hi = lo; + else + while (hi-lo!=1) { + mid = (lo+hi)/2; + if (icmp(st[mid], i)<0) + lo = mid; + else + hi = mid; + } + if (hint; n++) + chg |= tsunion(&i1->ts[n].lk, &i->ts[n].lk); + i1->dirty |= chg; + *pi = i1; + return chg; + } else { + st = realloc(st, ++nst * sizeof st[0]); + if (!st) + die("out of memory"); + memmove(&st[hi+1], &st[hi], (nst-1 - hi) * sizeof st[0]); + i->gtbl = yalloc(nsy, sizeof i->gtbl[0]); + i->dirty = 1; + i1 = yalloc(1, sizeof *i1); + *i1 = *i; + *pi = st[hi] = i1; + return 1; + } +} + +void +stgen() +{ + Sym s; + Rule *r; + Item *i, *i1; + Term tini; + int n, chg; + + ini = &i0; + r = rfind(Sym0); + tini.rule = r; + tini.dot = 0; + tszero(&tini.lk); + SetBit(tini.lk.t, 0); + i0.nt = 0; + i0.ts[i0.nt++] = tini; + stadd(&ini); + do { + chg = 0; + for (n=0; ndirty) + continue; + i->dirty = 0; + iclose(i); + for (s=0; snt) { + i->gtbl[s] = 0; + continue; + } + chg |= stadd(&i1); + i->gtbl[s] = i1; + } + } + } while (chg); +} + +int +resolve(Rule *r, Sym s, int st) +{ + if (!r->prec || !is[s].prec) { + conflict: + if (fgrm) + fprintf(fgrm, srs, st, is[s].name); + srconf++; + return ARight; + } + if (r->prec==is[s].prec) { + if (is[s].assoc == ANone) + goto conflict; + return is[s].assoc; + } else + if (r->precrule->rhs[t->dot]; + if (s!=S) { + /* shift */ + if (s>=ntk) + return; + assert(i->gtbl[s]); + act = ARight; + if (tbl[s] && tbl[s] != i->gtbl[s]->id) { + assert(tbl[s]<=0); + act = resolve(&rs[Red(tbl[s])], s, i->id-1); + } + switch (act) { + case ARight: + tbl[s] = i->gtbl[s]->id; + break; + case ANonassoc: + tbl[s] = -1; + break; + } + } else + /* reduce */ + for (s=0; slk.t, s)) + continue; + /* default to shift if conflict occurs */ + if (!tbl[s]) + act = ALeft; + else if (tbl[s]<0) { + if (fgrm) + fprintf(fgrm, rrs, i->id-1, is[s].name); + rrconf++; + act = ARight; + } else + act = resolve(t->rule, s, i->id-1); + switch (act) { + case ALeft: + tbl[s] = Red(t->rule-rs); + break; + case ANonassoc: + tbl[s] = -1; + break; + } + } +} + +void +setdef(Row *r, int w, int top) +{ + int n, m, x, cnt, def, max; + + max = 0; + def = -1; + r->ndef = 0; + for (n=0; nt[n]; + if (x==0) + r->ndef++; + if (x>=top || x==0) + continue; + cnt = 1; + for (m=n+1; mt[m]==x) + cnt++; + if (cnt>max) { + def = x; + max = cnt; + } + } + r->def = def; + if (max!=0) + /* zero out the most frequent entry */ + for (n=0; nt[n]==def) { + r->t[n] = 0; + r->ndef++; + } +} + +void +tblgen() +{ + Row *r; + Item *i; + int n, m; + + for (n=0; nid = n+1; + as = yalloc(nst, sizeof as[0]); + gs = yalloc(nsy-MaxTk, sizeof gs[0]); + /* fill action table */ + for (n=0; nt = yalloc(ntk, sizeof r->t[0]); + for (i=st[n], m=0; mnt; m++) + tblset(r->t, i, &i->ts[m]); + setdef(r, ntk, -1); + r->def = Red(r->def); /* Red(-1) == -1 */ + } + /* fill goto table */ + for (n=MaxTk; nt = yalloc(nst, sizeof r->t[0]); + for (m=0; mgtbl[n]) + r->t[m] = st[m]->gtbl[n]->id; + setdef(r, nst, nst+1); + } +} + +int +prcmp(const void *a, const void *b) +{ + return (*(Row **)a)->ndef - (*(Row **)b)->ndef; +} + +void +actgen() +{ + Row **o, *r; + int n, m, t, dsp, nnt; + + actsz = 0; + o = yalloc(nst+nsy, sizeof o[0]); + act = yalloc(nst*nsy, sizeof act[0]); + chk = yalloc(nst*nsy, sizeof chk[0]); + adsp = yalloc(nst, sizeof adsp[0]); + for (n=0; nt[m]==0; m++) + dsp--; + retrya: + /* The invariant here is even + * trickier than it looks. + */ + for (t=0; t=0 && chk[m]>=0) + if ((r->t[t] && (chk[m]!=t || act[m]!=r->t[t])) + || (!r->t[t] && chk[m]==t)) { + dsp++; + goto retrya; + } + adsp[r-as] = dsp; + for (t=0; tt[t]) { + chk[dsp+t] = t; + act[dsp+t] = r->t[t]; + if (dsp+t>=actsz) + actsz = dsp+t+1; + } + } + /* fill in gotos */ + nnt = nsy-MaxTk; + gdsp = yalloc(nnt, sizeof gdsp[0]); + for (n=0; nt[m]==0; m++) + dsp--; + retryg: + for (t=m; t=0 && r->t[t]) { + dsp++; + goto retryg; + } + gdsp[r-gs] = dsp; + for (t=m; tt[t]) { + chk[dsp+t] = ntk+(r-gs); + act[dsp+t] = r->t[t]; + if (dsp+t>=actsz) + actsz = dsp+t+1; + } + } + free(o); +} + +void +aout(char *name, int *t, int n) +{ + int i; + + fprintf(fout, "short %s[] = {", name); + for (i=0; iid-1); + fprintf(fout, "short yyntoks = %d;\n", ntk); + o = yalloc(nrl+nst+nsy, sizeof o[0]); + for (n=0; n0 || o[n]==-1); + if (o[n]>0) + o[n]--; + } + aout("yygdef", o, nsy-MaxTk); + aout("yyadsp", adsp, nst); + aout("yygdsp", gdsp, nsy-MaxTk); + for (n=0; n=0) + act[n]--; + aout("yyact", act, actsz); + aout("yychk", chk, actsz); + for (n=0; n<128; n++) { + o[n] = 0; + for (m=0; m", (int)(r-rs), is[r->lhs].name); + for (s1=r->rhs; *s1!=S; s1++) + fprintf(fgrm, " %s", is[*s1].name); + } + fprintf(fgrm, "\n"); + for (m=0; mts; t-st[m]->tsnt; t++) { + r = t->rule; + d = t->dot; + if (d==0 && t!=st[m]->ts) + continue; + fprintf(fgrm, " %s ->", is[r->lhs].name); + for (s1=r->rhs; *s1!=S; s1++, d--) + fprintf(fgrm, " %s%s", d ? "" : ". ", is[*s1].name); + if (!d) + fprintf(fgrm, " ."); + fprintf(fgrm, "\n"); + } + fprintf(fgrm, "\n"); + ar = &as[m]; + for (n=0; nt[n]; + if (!act) + continue; + if (act==-1) + fprintf(fgrm, " %s error (nonassoc)\n", is[n].name); + else if (act<0) + fprintf(fgrm, " %s reduce with rule %d\n", is[n].name, Red(act)); + else + fprintf(fgrm, " %s shift and go to %d\n", is[n].name, act-1); + } + if (ar->def != -1) + fprintf(fgrm, " * reduce with rule %d\n", ar->def); + } +} + +enum { + TIdnt, + TTokchr, /* 'c' */ + TPP, /* %% */ + TLL, /* %{ */ + TLangle, /* < */ + TRangle, /* > */ + TSemi, /* ; */ + TBar, /* | */ + TColon, /* : */ + TLBrack, /* { */ + TUnion, + TType, + TToken, + TRight, + TLeft, + TNonassoc, + TPrec, + TStart, + TEof +}; + +struct { + char *name; + int tok; +} words[] = { + { "%%", TPP }, + { "%union", TUnion }, + { "%type", TType }, + { "%token", TToken }, + { "%right", TRight }, + { "%left", TLeft }, + { "%nonassoc", TNonassoc }, + { "%prec", TPrec }, + { "%start", TStart }, + { 0, 0 } +}; + +char idnt[IdntSz]; + +int +istok(int c) +{ + return isalnum(c) || c=='_' || c=='%'; +} + +int +nexttk() +{ + int n; + char c, *p; + + while (isspace(c=fgetc(fin))) + if (c == '\n') + lineno++; + switch (c) { + case '<': + return TLangle; + case '>': + return TRangle; + case ';': + return TSemi; + case '|': + return TBar; + case ':': + return TColon; + case '{': + return TLBrack; + case EOF: + return TEof; + case '\'': + idnt[0] = '\''; + idnt[1] = fgetc(fin); + idnt[2] = '\''; + idnt[3] = 0; + if (fgetc(fin)!='\'') + die("syntax error, invalid char token"); + return TTokchr; + } + p = idnt; + while (istok(c)) { + *p++ = c; + if (p-idnt >= IdntSz-1) + die("identifier too long"); + c = fgetc(fin); + } + *p = 0; + if (strcmp(idnt, "%")==0) + if (c=='{') + return TLL; + ungetc(c, fin); + for (n=0; words[n].name; n++) + if (strcmp(idnt, words[n].name) == 0) + return words[n].tok; + return TIdnt; +} + +char * +cpycode() +{ + int c, nest, in, len, pos; + char *s; + + len = 64; + s = yalloc(len+1, 1); + s[0] = '{'; + pos = 1; + nest = 1; + in = 0; + + while (nest) { + c = fgetc(fin); + if (in) { + if (c == in) + if (s[pos-1] != '\\') + in = 0; + } else { + if (c == '"' || c == '\'') + in = c; + if (c == '{') + nest++; + if (c == '}') + nest--; + if (c == EOF) + die("syntax error, unclosed code block"); + if (c == '\n') + lineno++; + } + if (pos>=len) + if (!(s=realloc(s, len=2*len+1))) + die("out of memory"); + s[pos++] = c; + } + s[pos] = 0; + return s; +} + +int +gettype(char *type) +{ + int tk; + + tk = nexttk(); + if (tk==TLangle) { + if (nexttk()!=TIdnt) + die("syntax error, ident expected after <"); + strcpy(type, idnt); + if (nexttk()!=TRangle) + die("syntax error, unclosed <"); + return nexttk(); + } else { + type[0] = 0; + return tk; + } +} + +Sym +findsy(char *name, int add) +{ + int n; + + for (n=0; n=MaxTk) + die("too many tokens"); + ntk++; + strcpy(is[n].name, name); + return n; + } + n = MaxTk; + } + if (strcmp(is[n].name, name)==0) + return n; + } + if (add) { + if (nsy>=MaxTk+MaxNt) + die("too many non-terminals"); + strcpy(is[nsy].name, name); + return nsy++; + } else + return nsy; +} + +void +getdecls() +{ + int tk, prec, p, a, c, c1, n; + Info *si; + char type[IdntSz], *s; + + strcpy(is[0].name, "$"); + ntk = 1; + strcpy(is[Sym0].name, "@start"); + nsy = MaxTk+1; + sstart = S; + prec = 0; + tk = nexttk(); + for (;;) + switch (tk) { + case TStart: + tk = nexttk(); + if (tk!=TIdnt) + die("syntax error, ident expected after %start"); + sstart = findsy(idnt, 1); + if (sstart=MaxTk && n=MaxTk) + die("too many tokens"); + n = ntk++; + } + si = &is[n]; + strcpy(si->name, idnt); + strcpy(si->type, type); + si->prec = p; + si->assoc = a; + tk = nexttk(); + } + break; + case TType: + tk = gettype(type); + if (type[0]==0) + die("syntax error, type expected"); + while (tk==TIdnt) { + si = 0; + n = findsy(idnt, 1); + if (nname, idnt); + strcpy(si->type, type); + tk = nexttk(); + } + break; + case TLL: + fprintf(fout, "#line %d \"%s\"\n", lineno, srca); + for (;;) { + c = fgetc(fin); + if (c == EOF) + die("syntax error, unclosed %{"); + if (c == '%') { + c1 = fgetc(fin); + if (c1 == '}') { + fputs("\n", fout); + break; + } + ungetc(c1, fin); + } + if (c == '\n') + lineno++; + fputc(c, fout); + } + tk = nexttk(); + break; + case TPP: + return; + case TEof: + die("syntax error, unfinished declarations"); + default: + die("syntax error, declaration expected"); + } +} + +void +getgram() +{ + extern char *retcode; + int tk; + Sym hd, *p, s; + Rule *r; + + for (;;) { + tk = nexttk(); + if (tk==TPP || tk==TEof) { + if (sstart==S) + die("syntax error, empty grammar"); + r = &rs[nrl++]; + r->lhs = Sym0; + r->rhs[0] = sstart; + r->rhs[1] = 0; + r->rhs[2] = S; + r->act = retcode; + qsort(rs, nrl, sizeof rs[0], rcmp); + return; + } + if (tk!=TIdnt) + die("syntax error, production rule expected"); + if (nexttk()!=TColon) + die("syntax error, colon expected after production's head"); + hd = findsy(idnt, 1); + if (sstart==S) + sstart = hd; + do { + if (nrl>=MaxRl-1) + die("too many rules"); + r = &rs[nrl++]; + r->lhs = hd; + r->act = 0; + p = r->rhs; + while ((tk=nexttk())==TIdnt || tk==TTokchr || tk==TPrec) { + if (tk==TPrec) { + tk = nexttk(); + if (tk!=TIdnt + || (s=findsy(idnt, 0))>=ntk) + die("token expected after %prec"); + r->prec = is[s].prec; + continue; + } + s = findsy(idnt, 1); + *p++ = s; + if (s0) + r->prec = is[s].prec; + if (p-r->rhs >= MaxRhs-1) + die("production rule too long"); + } + *p = S; + if (tk==TLBrack) { + r->actln = lineno; + r->act = cpycode(); + tk = nexttk(); + } + } while (tk==TBar); + if (tk!=TSemi) + die("syntax error, ; or | expected"); + } +} + +void +actout(Rule *r) +{ + long l; + int i, ar; + char c, *p, *ty, tya[IdntSz]; + + ar = slen(r->rhs); + p = r->act; + i = r->actln; + if (!p) + return; + while ((c=*p++)) + switch (c) { + case '\n': + i++; + default: + fputc(c, fout); + break; + case '$': + c = *p++; + if (c == '$') { + fprintf(fout, "yyval"); + if (doty) { + ty = is[r->lhs].type; + if (!ty[0]) { + lineno = i; + die("$$ has no type"); + } + fprintf(fout, ".%s", ty); + } + } + else if (c == '<') { + ty = tya; + while (istok(*p) && ty-tya ar) { + lineno = i; + die("invalid $n"); + } + fprintf(fout, "ps[%d].val", (int)l); + if (doty) { + if (!ty && l>0) + ty = is[r->rhs[l-1]].type; + if (!ty || !ty[0]) { + lineno = i; + die("$n has no type"); + } + fprintf(fout, ".%s", ty); + } + } + else { + fputc('$', fout); + fputc(c, fout); + } + } + fputs("\n", fout); +} + +void +codeout() +{ + extern char *code0[], *code1[]; + char **p; + Rule *r; + int n, c; + + for (p=code0; *p; p++) + fputs(*p, fout); + for (n=0; nactln, srca); + actout(r); + fputs("\t\tbreak;\n", fout); + } + for (p=code1; *p; p++) + fputs(*p, fout); + fprintf(fout, "#line %d \"%s\"\n", lineno, srca); + while ((c=fgetc(fin))!=EOF) + fputc(c, fout); +} + +void +init(int ac, char *av[]) +{ + int c, vf, df; + char *pref, buf[100], *opt; + + (void) ac; + pref = "y"; + vf = df = 0; + for (av++; av[0] && av[0][0]=='-'; av++) + for (opt = &av[0][1]; (c = *opt); opt++) + switch (c) { + case 'v': + vf = 1; + break; + case 'd': + df = 1; + break; + case 'b': + if ((pref = *++av)) + break; + default: + usage: + fputs("usage: myacc [-vd] [-b file_prefix] grammar\n", stderr); + exit(1); + } + + if (!(srca = *av)) + goto usage; + fin = fopen(srca, "r"); + if (strlen(pref) + 10 > sizeof buf) + die("-b prefix too long"); + sprintf(buf, "%s.tab.c", pref); + fout = fopen(buf, "w"); + if (vf) { + sprintf(buf, "%s.output", pref); + fgrm = fopen(buf, "w"); + } + if (df) { + sprintf(buf, "%s.tab.h", pref); + fhdr = fopen(buf, "w"); + if (fhdr) { + fprintf(fhdr, "#ifndef Y_TAB_H_\n"); + fprintf(fhdr, "#define Y_TAB_H_\n"); + } + } + if (!fin || !fout || (!fgrm && vf) || (!fhdr && df)) + die("cannot open work files"); +} + +int +main(int ac, char *av[]) +{ + + init(ac, av); + getdecls(); + getgram(); + ginit(); + stgen(); + tblgen(); + stdump(); + actgen(); + tblout(); + codeout(); + + if (srconf) + fprintf(stderr, "%d shift/reduce conflicts\n", srconf); + if (rrconf) + fprintf(stderr, "%d reduce/reduce conflicts\n", rrconf); + + exit(0); +} + +/* Glorious macros. + |sed 's|.*|"&\\n",|' +*/ + +char *retcode = "\t\tyyval = ps[1].val; return 0;"; + +char *code0[] = { +"\n", +"#ifndef YYSTYPE\n", +"#define YYSTYPE int\n", +"#endif\n", +"YYSTYPE yylval;\n", +"\n", +"int\n", +"yyparse()\n", +"{\n", +" enum {\n", +" StackSize = 100,\n", +" ActSz = sizeof yyact / sizeof yyact[0],\n", +" };\n", +" struct {\n", +" YYSTYPE val;\n", +" int state;\n", +" } stk[StackSize], *ps;\n", +" int r, h, n, s, tk;\n", +" YYSTYPE yyval;\n", +"\n", +" ps = stk;\n", +" ps->state = s = yyini;\n", +" tk = -1;\n", +"loop:\n", +" n = yyadsp[s];\n", +" if (tk < 0 && n > -yyntoks)\n", +" tk = yytrns[yylex()];\n", +" n += tk;\n", +" if (n < 0 || n >= ActSz || yychk[n] != tk) {\n", +" r = yyadef[s];\n", +" if (r < 0)\n", +" return -1;\n", +" goto reduce;\n", +" }\n", +" n = yyact[n];\n", +" if (n == -1)\n", +" return -1;\n", +" if (n < 0) {\n", +" r = - (n+2);\n", +" goto reduce;\n", +" }\n", +" tk = -1;\n", +" yyval = yylval;\n", +"stack:\n", +" ps++;\n", +" if (ps-stk >= StackSize)\n", +" return -2;\n", +" s = n;\n", +" ps->state = s;\n", +" ps->val = yyval;\n", +" goto loop;\n", +"reduce:\n", +" ps -= yyr1[r];\n", +" h = yyr2[r];\n", +" s = ps->state;\n", +" n = yygdsp[h] + s;\n", +" if (n < 0 || n >= ActSz || yychk[n] != yyntoks+h)\n", +" n = yygdef[h];\n", +" else\n", +" n = yyact[n];\n", +" switch (r) {\n", +0 +}; + +char *code1[] = { +" }\n", +" goto stack;\n", +"}\n", +0 +}; diff --git a/src/qbe/ops.h b/src/qbe/ops.h new file mode 100644 index 00000000..f59e7578 --- /dev/null +++ b/src/qbe/ops.h @@ -0,0 +1,228 @@ +#ifndef X /* amd64 */ + #define X(NMemArgs, SetsZeroFlag, LeavesFlags) +#endif + +#ifndef V /* riscv64 */ + #define V(Imm) +#endif + +#ifndef F +#define F(a,b,c,d,e,f,g,h,i,j) +#endif + +#define T(a,b,c,d,e,f,g,h) { \ + {[Kw]=K##a, [Kl]=K##b, [Ks]=K##c, [Kd]=K##d}, \ + {[Kw]=K##e, [Kl]=K##f, [Ks]=K##g, [Kd]=K##h} \ +} + +/*********************/ +/* PUBLIC OPERATIONS */ +/*********************/ + +/* can fold */ +/* | has identity */ +/* | | identity value for arg[1] */ +/* | | | commutative */ +/* | | | | associative */ +/* | | | | | idempotent */ +/* | | | | | | c{eq,ne}[wl] */ +/* | | | | | | | c[us][gl][et][wl] */ +/* | | | | | | | | value if = args */ +/* | | | | | | | | | pinned */ +/* Arithmetic and Bits v v v v v v v v v v */ +O(add, T(w,l,s,d, w,l,s,d), F(1,1,0,1,1,0,0,0,0,0)) X(2,1,0) V(1) +O(sub, T(w,l,s,d, w,l,s,d), F(1,1,0,0,0,0,0,0,0,0)) X(2,1,0) V(0) +O(neg, T(w,l,s,d, x,x,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(1,1,0) V(0) +O(div, T(w,l,s,d, w,l,s,d), F(1,1,1,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(rem, T(w,l,e,e, w,l,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(udiv, T(w,l,e,e, w,l,e,e), F(1,1,1,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(urem, T(w,l,e,e, w,l,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(mul, T(w,l,s,d, w,l,s,d), F(1,1,1,1,0,0,0,0,0,0)) X(2,0,0) V(0) +O(and, T(w,l,e,e, w,l,e,e), F(1,0,0,1,1,1,0,0,0,0)) X(2,1,0) V(1) +O(or, T(w,l,e,e, w,l,e,e), F(1,1,0,1,1,1,0,0,0,0)) X(2,1,0) V(1) +O(xor, T(w,l,e,e, w,l,e,e), F(1,1,0,1,1,0,0,0,0,0)) X(2,1,0) V(1) +O(sar, T(w,l,e,e, w,w,e,e), F(1,1,0,0,0,0,0,0,0,0)) X(1,1,0) V(1) +O(shr, T(w,l,e,e, w,w,e,e), F(1,1,0,0,0,0,0,0,0,0)) X(1,1,0) V(1) +O(shl, T(w,l,e,e, w,w,e,e), F(1,1,0,0,0,0,0,0,0,0)) X(1,1,0) V(1) + +/* Comparisons */ +O(ceqw, T(w,w,e,e, w,w,e,e), F(1,1,1,1,0,0,1,0,1,0)) X(0,1,0) V(0) +O(cnew, T(w,w,e,e, w,w,e,e), F(1,1,0,1,0,0,1,0,0,0)) X(0,1,0) V(0) +O(csgew, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(csgtw, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(0) +O(cslew, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(csltw, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(1) +O(cugew, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(cugtw, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(0) +O(culew, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(cultw, T(w,w,e,e, w,w,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(1) + +O(ceql, T(l,l,e,e, l,l,e,e), F(1,0,0,1,0,0,1,0,1,0)) X(0,1,0) V(0) +O(cnel, T(l,l,e,e, l,l,e,e), F(1,0,0,1,0,0,1,0,0,0)) X(0,1,0) V(0) +O(csgel, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(csgtl, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(0) +O(cslel, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(csltl, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(1) +O(cugel, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(cugtl, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(0) +O(culel, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,1,0)) X(0,1,0) V(0) +O(cultl, T(l,l,e,e, l,l,e,e), F(1,0,0,0,0,0,0,1,0,0)) X(0,1,0) V(1) + +O(ceqs, T(s,s,e,e, s,s,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cges, T(s,s,e,e, s,s,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cgts, T(s,s,e,e, s,s,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cles, T(s,s,e,e, s,s,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(clts, T(s,s,e,e, s,s,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cnes, T(s,s,e,e, s,s,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cos, T(s,s,e,e, s,s,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cuos, T(s,s,e,e, s,s,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) + +O(ceqd, T(d,d,e,e, d,d,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cged, T(d,d,e,e, d,d,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cgtd, T(d,d,e,e, d,d,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cled, T(d,d,e,e, d,d,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cltd, T(d,d,e,e, d,d,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cned, T(d,d,e,e, d,d,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cod, T(d,d,e,e, d,d,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) +O(cuod, T(d,d,e,e, d,d,e,e), F(1,0,0,1,0,0,0,0,0,0)) X(0,1,0) V(0) + +/* Memory */ +O(storeb, T(w,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(storeh, T(w,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(storew, T(w,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(storel, T(l,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(stores, T(s,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(stored, T(d,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) + +O(loadsb, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(loadub, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(loadsh, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(loaduh, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(loadsw, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(loaduw, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) +O(load, T(m,m,m,m, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) + +/* Extensions and Truncations */ +O(extsb, T(w,w,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(extub, T(w,w,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(extsh, T(w,w,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(extuh, T(w,w,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(extsw, T(e,w,e,e, e,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(extuw, T(e,w,e,e, e,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) + +O(exts, T(e,e,e,s, e,e,e,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(truncd, T(e,e,d,e, e,e,x,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(stosi, T(s,s,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(stoui, T(s,s,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(dtosi, T(d,d,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(dtoui, T(d,d,e,e, x,x,e,e), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(swtof, T(e,e,w,w, e,e,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(uwtof, T(e,e,w,w, e,e,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(sltof, T(e,e,l,l, e,e,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(ultof, T(e,e,l,l, e,e,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(cast, T(s,d,w,l, x,x,x,x), F(1,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) + +/* Stack Allocation */ +O(alloc4, T(e,l,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(alloc8, T(e,l,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(alloc16, T(e,l,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) + +/* Variadic Function Helpers */ +O(vaarg, T(m,m,m,m, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(vastart, T(m,e,e,e, x,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) + +O(copy, T(w,l,s,d, x,x,x,x), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) + +/* Debug */ +O(dbgloc, T(w,e,e,e, w,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,1) V(0) + +/****************************************/ +/* INTERNAL OPERATIONS (keep nop first) */ +/****************************************/ + +/* Miscellaneous and Architecture-Specific Operations */ +O(nop, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(addr, T(m,m,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(blit0, T(m,e,e,e, m,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,1,0) V(0) +O(blit1, T(w,e,e,e, x,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,1,0) V(0) +O(sel0, T(w,e,e,e, x,e,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(sel1, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(swap, T(w,l,s,d, w,l,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(1,0,0) V(0) +O(sign, T(w,l,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(salloc, T(e,l,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xidiv, T(w,l,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(1,0,0) V(0) +O(xdiv, T(w,l,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(1,0,0) V(0) +O(xcmp, T(w,l,s,d, w,l,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(1,1,0) V(0) +O(xtest, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(1,1,0) V(0) +O(acmp, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(acmn, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(afcmp, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(reqz, T(w,l,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(rnez, T(w,l,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) + +/* Arguments, Parameters, and Calls */ +O(par, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(parsb, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(parub, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(parsh, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(paruh, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(parc, T(e,x,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(pare, T(e,x,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(arg, T(w,l,s,d, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(argsb, T(w,e,e,e, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(argub, T(w,e,e,e, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(argsh, T(w,e,e,e, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(arguh, T(w,e,e,e, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(argc, T(e,x,e,e, e,l,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(arge, T(e,l,e,e, e,x,e,e), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(argv, T(x,x,x,x, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) +O(call, T(m,m,m,m, x,x,x,x), F(0,0,0,0,0,0,0,0,0,1)) X(0,0,0) V(0) + +/* Flags Setting */ +O(flagieq, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagine, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagisge, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagisgt, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagisle, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagislt, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagiuge, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagiugt, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagiule, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagiult, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfeq, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfge, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfgt, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfle, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagflt, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfne, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfo, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) +O(flagfuo, T(x,x,e,e, x,x,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,1) V(0) + +/* Backend Flag Select (Condition Move) */ +O(xselieq, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xseline, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselisge, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselisgt, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselisle, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselislt, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xseliuge, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xseliugt, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xseliule, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xseliult, T(w,l,e,e, w,l,e,e), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfeq, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfge, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfgt, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfle, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselflt, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfne, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfo, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) +O(xselfuo, T(e,e,s,d, e,e,s,d), F(0,0,0,0,0,0,0,0,0,0)) X(0,0,0) V(0) + +#undef T +#undef X +#undef V +#undef O + +/* +| column -t -o ' ' +*/ diff --git a/src/qbe/out.s b/src/qbe/out.s new file mode 100644 index 00000000..a77420f3 --- /dev/null +++ b/src/qbe/out.s @@ -0,0 +1,43 @@ +.text +.balign 4 +_add: + hint #34 + stp x29, x30, [sp, -16]! + mov x29, sp + add w0, w0, w1 + ldp x29, x30, [sp], 16 + ret +/* end function add */ + +.text +.balign 4 +.globl _main +_main: + hint #34 + stp x29, x30, [sp, -16]! + mov x29, sp + mov w1, #1 + mov w0, #1 + bl _add + mov x1, #16 + sub sp, sp, x1 + mov x1, #0 + add x1, sp, x1 + str w0, [x1] + adrp x0, _fmt@page + add x0, x0, _fmt@pageoff + bl _printf + mov x0, #16 + add sp, sp, x0 + mov w0, #0 + ldp x29, x30, [sp], 16 + ret +/* end function main */ + +.data +.balign 8 +_fmt: + .ascii "One and one make %d!\n" + .byte 0 +/* end data */ + diff --git a/src/qbe/parse.c b/src/qbe/parse.c new file mode 100644 index 00000000..7ab3ea59 --- /dev/null +++ b/src/qbe/parse.c @@ -0,0 +1,1433 @@ +#include "all.h" +#include +#include + +enum { + Ksb = 4, /* matches Oarg/Opar/Jret */ + Kub, + Ksh, + Kuh, + Kc, + K0, + + Ke = -2, /* erroneous mode */ + Km = Kl, /* memory pointer */ +}; + +Op optab[NOp] = { +#undef F +#define F(cf, hi, id, co, as, im, ic, lg, cv, pn) \ + .canfold = cf, \ + .hasid = hi, .idval = id, \ + .commutes = co, .assoc = as, \ + .idemp = im, \ + .cmpeqwl = ic, .cmplgtewl = lg, .eqval = cv, \ + .pinned = pn +#define O(op, k, flags) [O##op]={.name = #op, .argcls = k, flags}, + #include "ops.h" +#undef F +}; + +typedef enum { + PXXX, + PLbl, + PPhi, + PIns, + PEnd, +} PState; + +enum Token { + Txxx = 0, + + /* aliases */ + Tloadw = NPubOp, + Tloadl, + Tloads, + Tloadd, + Talloc1, + Talloc2, + + Tblit, + Tcall, + Tenv, + Tphi, + Tjmp, + Tjnz, + Tret, + Thlt, + Texport, + Tthread, + Tcommon, + Tfunc, + Ttype, + Tdata, + Tsection, + Talign, + Tdbgfile, + Tl, + Tw, + Tsh, + Tuh, + Th, + Tsb, + Tub, + Tb, + Td, + Ts, + Tz, + + Tint, + Tflts, + Tfltd, + Ttmp, + Tlbl, + Tglo, + Ttyp, + Tstr, + + Tplus, + Teq, + Tcomma, + Tlparen, + Trparen, + Tlbrace, + Trbrace, + Tnl, + Tdots, + Teof, + + Ntok +}; + +static char *kwmap[Ntok] = { + [Tloadw] = "loadw", + [Tloadl] = "loadl", + [Tloads] = "loads", + [Tloadd] = "loadd", + [Talloc1] = "alloc1", + [Talloc2] = "alloc2", + [Tblit] = "blit", + [Tcall] = "call", + [Tenv] = "env", + [Tphi] = "phi", + [Tjmp] = "jmp", + [Tjnz] = "jnz", + [Tret] = "ret", + [Thlt] = "hlt", + [Texport] = "export", + [Tthread] = "thread", + [Tcommon] = "common", + [Tfunc] = "function", + [Ttype] = "type", + [Tdata] = "data", + [Tsection] = "section", + [Talign] = "align", + [Tdbgfile] = "dbgfile", + [Tsb] = "sb", + [Tub] = "ub", + [Tsh] = "sh", + [Tuh] = "uh", + [Tb] = "b", + [Th] = "h", + [Tw] = "w", + [Tl] = "l", + [Ts] = "s", + [Td] = "d", + [Tz] = "z", + [Tdots] = "...", +}; + +enum { + NPred = 63, + + TMask = 16383, /* for temps hash */ + BMask = 8191, /* for blocks hash */ + + K = 11183273, /* found using tools/lexh.c */ + M = 23, +}; + +static uchar lexh[1 << (32-M)]; +static FILE *inf; +static char *inpath; +static int thead; +static struct { + char chr; + double fltd; + float flts; + int64_t num; + char *str; +} tokval; +static int lnum; + +static Fn *curf; +static int *tmph; +static int tmphcap; +static Phi **plink; +static Blk *curb; +static Blk **blink; +static Blk *blkh[BMask+1]; +static int nblk; +static int rcls; +static uint ntyp; + +void +err(char *s, ...) +{ + va_list ap; + + va_start(ap, s); + fprintf(stderr, "qbe:%s:%d: ", inpath, lnum); + vfprintf(stderr, s, ap); + fprintf(stderr, "\n"); + va_end(ap); + exit(1); +} + +static void +lexinit() +{ + static int done; + int i; + long h; + + if (done) + return; + for (i=0; i> M; + assert(lexh[h] == Txxx); + lexh[h] = i; + } + done = 1; +} + +static int64_t +getint() +{ + uint64_t n; + int c, m; + + n = 0; + c = fgetc(inf); + m = (c == '-'); + if (m) + c = fgetc(inf); + do { + n = 10*n + (c - '0'); + c = fgetc(inf); + } while ('0' <= c && c <= '9'); + ungetc(c, inf); + if (m) + n = 1 + ~n; + return *(int64_t *)&n; +} + +static int +lex() +{ + static char tok[NString]; + int c, i, esc; + int t; + + do + c = fgetc(inf); + while (isblank(c)); + t = Txxx; + tokval.chr = c; + switch (c) { + case EOF: + return Teof; + case ',': + return Tcomma; + case '(': + return Tlparen; + case ')': + return Trparen; + case '{': + return Tlbrace; + case '}': + return Trbrace; + case '=': + return Teq; + case '+': + return Tplus; + case 's': + if (fscanf(inf, "_%f", &tokval.flts) != 1) + break; + return Tflts; + case 'd': + if (fscanf(inf, "_%lf", &tokval.fltd) != 1) + break; + return Tfltd; + case '%': + t = Ttmp; + c = fgetc(inf); + goto Alpha; + case '@': + t = Tlbl; + c = fgetc(inf); + goto Alpha; + case '$': + t = Tglo; + if ((c = fgetc(inf)) == '"') + goto Quoted; + goto Alpha; + case ':': + t = Ttyp; + c = fgetc(inf); + goto Alpha; + case '#': + while ((c=fgetc(inf)) != '\n' && c != EOF) + ; + /* fall through */ + case '\n': + lnum++; + return Tnl; + } + if (isdigit(c) || c == '-') { + ungetc(c, inf); + tokval.num = getint(); + return Tint; + } + if (c == '"') { + t = Tstr; + Quoted: + tokval.str = vnew(2, 1, PFn); + tokval.str[0] = c; + esc = 0; + for (i=1;; i++) { + c = fgetc(inf); + if (c == EOF) + err("unterminated string"); + vgrow(&tokval.str, i+2); + tokval.str[i] = c; + if (c == '"' && !esc) { + tokval.str[i+1] = 0; + return t; + } + esc = (c == '\\' && !esc); + } + } +Alpha: + if (!isalpha(c) && c != '.' && c != '_') + err("invalid character %c (%d)", c, c); + i = 0; + do { + if (i >= NString-1) + err("identifier too long"); + tok[i++] = c; + c = fgetc(inf); + } while (isalpha(c) || c == '$' || c == '.' || c == '_' || isdigit(c)); + tok[i] = 0; + ungetc(c, inf); + tokval.str = tok; + if (t != Txxx) { + return t; + } + t = lexh[hash(tok)*K >> M]; + if (t == Txxx || strcmp(kwmap[t], tok) != 0) { + err("unknown keyword %s", tok); + return Txxx; + } + return t; +} + +static int +peek() +{ + if (thead == Txxx) + thead = lex(); + return thead; +} + +static int +next() +{ + int t; + + t = peek(); + thead = Txxx; + return t; +} + +static int +nextnl() +{ + int t; + + while ((t = next()) == Tnl) + ; + return t; +} + +static void +expect(int t) +{ + static char *ttoa[] = { + [Tlbl] = "label", + [Tcomma] = ",", + [Teq] = "=", + [Tnl] = "newline", + [Tlparen] = "(", + [Trparen] = ")", + [Tlbrace] = "{", + [Trbrace] = "}", + [Teof] = 0, + }; + char buf[128], *s1, *s2; + int t1; + + t1 = next(); + if (t == t1) + return; + s1 = ttoa[t] ? ttoa[t] : "??"; + s2 = ttoa[t1] ? ttoa[t1] : "??"; + sprintf(buf, "%s expected, got %s instead", s1, s2); + err(buf); +} + +static Ref +tmpref(char *v) +{ + int t, i; + + if (tmphcap/2 <= curf->ntmp-Tmp0) { + free(tmph); + tmphcap = tmphcap ? tmphcap*2 : TMask+1; + tmph = emalloc(tmphcap * sizeof tmph[0]); + for (t=Tmp0; tntmp; t++) { + i = hash(curf->tmp[t].name) & (tmphcap-1); + for (; tmph[i]; i=(i+1) & (tmphcap-1)) + ; + tmph[i] = t; + } + } + i = hash(v) & (tmphcap-1); + for (; tmph[i]; i=(i+1) & (tmphcap-1)) { + t = tmph[i]; + if (strcmp(curf->tmp[t].name, v) == 0) + return TMP(t); + } + t = curf->ntmp; + tmph[i] = t; + newtmp(0, Kx, curf); + strcpy(curf->tmp[t].name, v); + return TMP(t); +} + +static Ref +parseref() +{ + Con c; + + memset(&c, 0, sizeof c); + switch (next()) { + default: + return R; + case Ttmp: + return tmpref(tokval.str); + case Tint: + c.type = CBits; + c.bits.i = tokval.num; + break; + case Tflts: + c.type = CBits; + c.bits.s = tokval.flts; + c.flt = 1; + break; + case Tfltd: + c.type = CBits; + c.bits.d = tokval.fltd; + c.flt = 2; + break; + case Tthread: + c.sym.type = SThr; + expect(Tglo); + /* fall through */ + case Tglo: + c.type = CAddr; + c.sym.id = intern(tokval.str); + break; + } + return newcon(&c, curf); +} + +static int +findtyp(int i) +{ + while (--i >= 0) + if (strcmp(tokval.str, typ[i].name) == 0) + return i; + err("undefined type :%s", tokval.str); +} + +static int +parsecls(int *tyn) +{ + switch (next()) { + default: + err("invalid class specifier"); + case Ttyp: + *tyn = findtyp(ntyp); + return Kc; + case Tsb: + return Ksb; + case Tub: + return Kub; + case Tsh: + return Ksh; + case Tuh: + return Kuh; + case Tw: + return Kw; + case Tl: + return Kl; + case Ts: + return Ks; + case Td: + return Kd; + } +} + +static int +parserefl(int arg) +{ + int k, ty, env, hasenv, vararg; + Ref r; + + hasenv = 0; + vararg = 0; + expect(Tlparen); + while (peek() != Trparen) { + if (curi - insb >= NIns) + err("too many instructions"); + if (!arg && vararg) + err("no parameters allowed after '...'"); + switch (peek()) { + case Tdots: + if (vararg) + err("only one '...' allowed"); + vararg = 1; + if (arg) { + *curi = (Ins){.op = Oargv}; + curi++; + } + next(); + goto Next; + case Tenv: + if (hasenv) + err("only one environment allowed"); + hasenv = 1; + env = 1; + next(); + k = Kl; + break; + default: + env = 0; + k = parsecls(&ty); + break; + } + r = parseref(); + if (req(r, R)) + err("invalid argument"); + if (!arg && rtype(r) != RTmp) + err("invalid function parameter"); + if (env) + if (arg) + *curi = (Ins){Oarge, k, R, {r}}; + else + *curi = (Ins){Opare, k, r, {R}}; + else if (k == Kc) + if (arg) + *curi = (Ins){Oargc, Kl, R, {TYPE(ty), r}}; + else + *curi = (Ins){Oparc, Kl, r, {TYPE(ty)}}; + else if (k >= Ksb) + if (arg) + *curi = (Ins){Oargsb+(k-Ksb), Kw, R, {r}}; + else + *curi = (Ins){Oparsb+(k-Ksb), Kw, r, {R}}; + else + if (arg) + *curi = (Ins){Oarg, k, R, {r}}; + else + *curi = (Ins){Opar, k, r, {R}}; + curi++; + Next: + if (peek() == Trparen) + break; + expect(Tcomma); + } + expect(Trparen); + return vararg; +} + +static Blk * +findblk(char *name) +{ + Blk *b; + uint32_t h; + + h = hash(name) & BMask; + for (b=blkh[h]; b; b=b->dlink) + if (strcmp(b->name, name) == 0) + return b; + b = newblk(); + b->id = nblk++; + strcpy(b->name, name); + b->dlink = blkh[h]; + blkh[h] = b; + return b; +} + +static void +closeblk() +{ + idup(curb, insb, curi-insb); + blink = &curb->link; + curi = insb; +} + +static PState +parseline(PState ps) +{ + Ref arg[NPred] = {R}; + Blk *blk[NPred]; + Phi *phi; + Ref r; + Blk *b; + Con *c; + int t, op, i, k, ty; + + t = nextnl(); + if (ps == PLbl && t != Tlbl && t != Trbrace) + err("label or } expected"); + switch (t) { + case Ttmp: + r = tmpref(tokval.str); + expect(Teq); + k = parsecls(&ty); + op = next(); + break; + default: + if (isstore(t)) { + case Tblit: + case Tcall: + case Ovastart: + /* operations without result */ + r = R; + k = Kw; + op = t; + break; + } + err("label, instruction or jump expected"); + case Trbrace: + return PEnd; + case Tlbl: + b = findblk(tokval.str); + if (curb && curb->jmp.type == Jxxx) { + closeblk(); + curb->jmp.type = Jjmp; + curb->s1 = b; + } + if (b->jmp.type != Jxxx) + err("multiple definitions of block @%s", b->name); + *blink = b; + curb = b; + plink = &curb->phi; + expect(Tnl); + return PPhi; + case Tret: + curb->jmp.type = Jretw + rcls; + if (peek() == Tnl) + curb->jmp.type = Jret0; + else if (rcls != K0) { + r = parseref(); + if (req(r, R)) + err("invalid return value"); + curb->jmp.arg = r; + } + goto Close; + case Tjmp: + curb->jmp.type = Jjmp; + goto Jump; + case Tjnz: + curb->jmp.type = Jjnz; + r = parseref(); + if (req(r, R)) + err("invalid argument for jnz jump"); + curb->jmp.arg = r; + expect(Tcomma); + Jump: + expect(Tlbl); + curb->s1 = findblk(tokval.str); + if (curb->jmp.type != Jjmp) { + expect(Tcomma); + expect(Tlbl); + curb->s2 = findblk(tokval.str); + } + if (curb->s1 == curf->start || curb->s2 == curf->start) + err("invalid jump to the start block"); + goto Close; + case Thlt: + curb->jmp.type = Jhlt; + Close: + expect(Tnl); + closeblk(); + return PLbl; + case Odbgloc: + op = t; + k = Kw; + r = R; + expect(Tint); + arg[0] = INT(tokval.num); + if (arg[0].val != tokval.num) + err("line number too big"); + if (peek() == Tcomma) { + next(); + expect(Tint); + arg[1] = INT(tokval.num); + if (arg[1].val != tokval.num) + err("column number too big"); + } else + arg[1] = INT(0); + goto Ins; + } + if (op == Tcall) { + curf->leaf = 0; + arg[0] = parseref(); + parserefl(1); + op = Ocall; + expect(Tnl); + if (k == Kc) { + k = Kl; + arg[1] = TYPE(ty); + } + if (k >= Ksb) + k = Kw; + goto Ins; + } + if (op == Tloadw) + op = Oloadsw; + if (op >= Tloadl && op <= Tloadd) + op = Oload; + if (op == Talloc1 || op == Talloc2) + op = Oalloc; + if (op == Ovastart && !curf->vararg) + err("cannot use vastart in non-variadic function"); + if (k >= Ksb) + err("size class must be w, l, s, or d"); + i = 0; + if (peek() != Tnl) + for (;;) { + if (i == NPred) + err("too many arguments"); + if (op == Tphi) { + expect(Tlbl); + blk[i] = findblk(tokval.str); + } + arg[i] = parseref(); + if (req(arg[i], R)) + err("invalid instruction argument"); + i++; + t = peek(); + if (t == Tnl) + break; + if (t != Tcomma) + err(", or end of line expected"); + next(); + } + next(); + switch (op) { + case Tphi: + if (ps != PPhi || curb == curf->start) + err("unexpected phi instruction"); + phi = alloc(sizeof *phi); + phi->to = r; + phi->cls = k; + phi->arg = vnew(i, sizeof arg[0], PFn); + memcpy(phi->arg, arg, i * sizeof arg[0]); + phi->blk = vnew(i, sizeof blk[0], PFn); + memcpy(phi->blk, blk, i * sizeof blk[0]); + phi->narg = i; + *plink = phi; + plink = &phi->link; + return PPhi; + case Tblit: + if (curi - insb >= NIns-1) + err("too many instructions"); + memset(curi, 0, 2 * sizeof(Ins)); + curi->op = Oblit0; + curi->arg[0] = arg[0]; + curi->arg[1] = arg[1]; + curi++; + if (rtype(arg[2]) != RCon) + err("blit size must be constant"); + c = &curf->con[arg[2].val]; + r = INT(c->bits.i); + if (c->type != CBits + || rsval(r) < 0 + || rsval(r) != c->bits.i) + err("invalid blit size"); + curi->op = Oblit1; + curi->arg[0] = r; + curi++; + return PIns; + default: + if (op >= NPubOp) + err("invalid instruction"); + Ins: + if (curi - insb >= NIns) + err("too many instructions"); + curi->op = op; + curi->cls = k; + curi->to = r; + curi->arg[0] = arg[0]; + curi->arg[1] = arg[1]; + curi++; + return PIns; + } +} + +static int +usecheck(Ref r, int k, Fn *fn) +{ + return rtype(r) != RTmp || fn->tmp[r.val].cls == k + || (fn->tmp[r.val].cls == Kl && k == Kw); +} + +static void +typecheck(Fn *fn) +{ + Blk *b; + Phi *p; + Ins *i; + uint n; + int k; + Tmp *t; + Ref r; + BSet pb[1], ppb[1]; + + fillpreds(fn); + bsinit(pb, fn->nblk); + bsinit(ppb, fn->nblk); + for (b=fn->start; b; b=b->link) { + for (p=b->phi; p; p=p->link) + fn->tmp[p->to.val].cls = p->cls; + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (rtype(i->to) == RTmp) { + t = &fn->tmp[i->to.val]; + if (clsmerge(&t->cls, i->cls)) + err("temporary %%%s is assigned with" + " multiple types", t->name); + } + } + for (b=fn->start; b; b=b->link) { + bszero(pb); + for (n=0; nnpred; n++) + bsset(pb, b->pred[n]->id); + for (p=b->phi; p; p=p->link) { + bszero(ppb); + t = &fn->tmp[p->to.val]; + for (n=0; nnarg; n++) { + k = t->cls; + if (bshas(ppb, p->blk[n]->id)) + err("multiple entries for @%s in phi %%%s", + p->blk[n]->name, t->name); + if (!usecheck(p->arg[n], k, fn)) + err("invalid type for operand %%%s in phi %%%s", + fn->tmp[p->arg[n].val].name, t->name); + bsset(ppb, p->blk[n]->id); + } + if (!bsequal(pb, ppb)) + err("predecessors not matched in phi %%%s", t->name); + } + for (i=b->ins; i<&b->ins[b->nins]; i++) + for (n=0; n<2; n++) { + k = optab[i->op].argcls[n][i->cls]; + r = i->arg[n]; + t = &fn->tmp[r.val]; + if (k == Ke) + err("invalid instruction type in %s", + optab[i->op].name); + if (rtype(r) == RType) + continue; + if (rtype(r) != -1 && k == Kx) + err("no %s operand expected in %s", + n == 1 ? "second" : "first", + optab[i->op].name); + if (rtype(r) == -1 && k != Kx) + err("missing %s operand in %s", + n == 1 ? "second" : "first", + optab[i->op].name); + if (!usecheck(r, k, fn)) + err("invalid type for %s operand %%%s in %s", + n == 1 ? "second" : "first", + t->name, optab[i->op].name); + } + r = b->jmp.arg; + if (isret(b->jmp.type)) { + if (b->jmp.type == Jretc) + k = Kl; + else if (b->jmp.type >= Jretsb) + k = Kw; + else + k = b->jmp.type - Jretw; + if (!usecheck(r, k, fn)) + goto JErr; + } + if (b->jmp.type == Jjnz && !usecheck(r, Kw, fn)) + JErr: + err("invalid type for jump argument %%%s in block @%s", + fn->tmp[r.val].name, b->name); + if (b->s1 && b->s1->jmp.type == Jxxx) + err("block @%s is used undefined", b->s1->name); + if (b->s2 && b->s2->jmp.type == Jxxx) + err("block @%s is used undefined", b->s2->name); + } +} + +static Fn * +parsefn(Lnk *lnk) +{ + Blk *b; + int i; + PState ps; + + curb = 0; + nblk = 0; + curi = insb; + curf = alloc(sizeof *curf); + curf->ntmp = 0; + curf->ncon = 2; + curf->tmp = vnew(curf->ntmp, sizeof curf->tmp[0], PFn); + curf->con = vnew(curf->ncon, sizeof curf->con[0], PFn); + for (i=0; icon[0].type = CBits; + curf->con[0].bits.i = 0xdeaddead; /* UNDEF */ + curf->con[1].type = CBits; + curf->lnk = *lnk; + curf->leaf = 1; + blink = &curf->start; + curf->retty = Kx; + if (peek() != Tglo) + rcls = parsecls(&curf->retty); + else + rcls = K0; + if (next() != Tglo) + err("function name expected"); + strncpy(curf->name, tokval.str, NString-1); + curf->vararg = parserefl(0); + if (nextnl() != Tlbrace) + err("function body must start with {"); + ps = PLbl; + do + ps = parseline(ps); + while (ps != PEnd); + if (!curb) + err("empty function"); + if (curb->jmp.type == Jxxx) + err("last block misses jump"); + curf->mem = vnew(0, sizeof curf->mem[0], PFn); + curf->nmem = 0; + curf->nblk = nblk; + curf->rpo = vnew(nblk, sizeof curf->rpo[0], PFn); + for (b=curf->start; b; b=b->link) + b->dlink = 0; /* was trashed by findblk() */ + for (i=0; ialign; + while (t != Trbrace) { + ty1 = 0; + switch (t) { + default: err("invalid type member specifier"); + case Td: type = Fd; s = 8; a = 3; break; + case Tl: type = Fl; s = 8; a = 3; break; + case Ts: type = Fs; s = 4; a = 2; break; + case Tw: type = Fw; s = 4; a = 2; break; + case Th: type = Fh; s = 2; a = 1; break; + case Tb: type = Fb; s = 1; a = 0; break; + case Ttyp: + type = FTyp; + ty1 = &typ[findtyp(ntyp-1)]; + s = ty1->size; + a = ty1->align; + break; + } + if (a > al) + al = a; + a = (1 << a) - 1; + a = ((sz + a) & ~a) - sz; + if (a) { + if (n < NField) { + /* padding */ + fld[n].type = FPad; + fld[n].len = a; + n++; + } + } + t = nextnl(); + if (t == Tint) { + c = tokval.num; + t = nextnl(); + } else + c = 1; + sz += a + c*s; + if (type == FTyp) + s = ty1 - typ; + for (; c>0 && nsize) + sz = ty->size; + ty->size = (sz + a - 1) & -a; + ty->align = al; +} + +static void +parsetyp() +{ + Typ *ty; + int t, al; + uint n; + + /* be careful if extending the syntax + * to handle nested types, any pointer + * held to typ[] might be invalidated! + */ + vgrow(&typ, ntyp+1); + ty = &typ[ntyp++]; + ty->isdark = 0; + ty->isunion = 0; + ty->align = -1; + ty->size = 0; + if (nextnl() != Ttyp || nextnl() != Teq) + err("type name and then = expected"); + strcpy(ty->name, tokval.str); + t = nextnl(); + if (t == Talign) { + if (nextnl() != Tint) + err("alignment expected"); + for (al=0; tokval.num /= 2; al++) + ; + ty->align = al; + t = nextnl(); + } + if (t != Tlbrace) + err("type body must start with {"); + t = nextnl(); + if (t == Tint) { + ty->isdark = 1; + ty->size = tokval.num; + if (ty->align == -1) + err("dark types need alignment"); + if (nextnl() != Trbrace) + err("} expected"); + return; + } + n = 0; + ty->fields = vnew(1, sizeof ty->fields[0], PHeap); + if (t == Tlbrace) { + ty->isunion = 1; + do { + if (t != Tlbrace) + err("invalid union member"); + vgrow(&ty->fields, n+1); + parsefields(ty->fields[n++], ty, nextnl()); + t = nextnl(); + } while (t != Trbrace); + } else + parsefields(ty->fields[n++], ty, t); + ty->nunion = n; +} + +static void +parsedatref(Dat *d) +{ + int t; + + d->isref = 1; + d->u.ref.name = tokval.str; + d->u.ref.off = 0; + t = peek(); + if (t == Tplus) { + next(); + if (next() != Tint) + err("invalid token after offset in ref"); + d->u.ref.off = tokval.num; + } +} + +static void +parsedatstr(Dat *d) +{ + d->isstr = 1; + d->u.str = tokval.str; +} + +static void +parsedat(void cb(Dat *), Lnk *lnk) +{ + char name[NString] = {0}; + int t; + Dat d; + + if (nextnl() != Tglo || nextnl() != Teq) + err("data name, then = expected"); + strncpy(name, tokval.str, NString-1); + t = nextnl(); + lnk->align = 8; + if (t == Talign) { + if (nextnl() != Tint) + err("alignment expected"); + if (tokval.num <= 0 || tokval.num > CHAR_MAX + || (tokval.num & (tokval.num-1)) != 0) + err("invalid alignment"); + lnk->align = tokval.num; + t = nextnl(); + } + d.type = DStart; + d.name = name; + d.lnk = lnk; + cb(&d); + + if (t != Tlbrace) + err("expected data contents in { .. }"); + for (;;) { + switch (nextnl()) { + default: err("invalid size specifier %c in data", tokval.chr); + case Trbrace: goto Done; + case Tl: d.type = DL; break; + case Tw: d.type = DW; break; + case Th: d.type = DH; break; + case Tb: d.type = DB; break; + case Ts: d.type = DW; break; + case Td: d.type = DL; break; + case Tz: d.type = DZ; break; + } + t = nextnl(); + do { + d.isstr = 0; + d.isref = 0; + memset(&d.u, 0, sizeof d.u); + if (t == Tflts) + d.u.flts = tokval.flts; + else if (t == Tfltd) + d.u.fltd = tokval.fltd; + else if (t == Tint) + d.u.num = tokval.num; + else if (t == Tglo) + parsedatref(&d); + else if (t == Tstr) + parsedatstr(&d); + else + err("constant literal expected"); + cb(&d); + t = nextnl(); + } while (t == Tint || t == Tflts || t == Tfltd || t == Tstr || t == Tglo); + if (t == Trbrace) + break; + if (t != Tcomma) + err(", or } expected"); + } +Done: + d.type = DEnd; + cb(&d); +} + +static int +parselnk(Lnk *lnk) +{ + int t, haslnk; + + for (haslnk=0;; haslnk=1) + switch ((t=nextnl())) { + case Texport: + lnk->export = 1; + break; + case Tthread: + lnk->thread = 1; + break; + case Tcommon: + lnk->common = 1; + break; + case Tsection: + if (lnk->sec) + err("only one section allowed"); + if (next() != Tstr) + err("section \"name\" expected"); + lnk->sec = tokval.str; + if (peek() == Tstr) { + next(); + lnk->secf = tokval.str; + } + break; + default: + if (t == Tfunc && lnk->thread) + err("only data may have thread linkage"); + if (haslnk && t != Tdata && t != Tfunc) + err("only data and function have linkage"); + return t; + } +} + +void +parse(FILE *f, char *path, void dbgfile(char *), void data(Dat *), void func(Fn *)) +{ + Lnk lnk; + uint n; + + lexinit(); + inf = f; + inpath = path; + lnum = 1; + thead = Txxx; + ntyp = 0; + typ = vnew(0, sizeof typ[0], PHeap); + for (;;) { + lnk = (Lnk){0}; + switch (parselnk(&lnk)) { + default: + err("top-level definition expected"); + case Tdbgfile: + expect(Tstr); + dbgfile(tokval.str); + break; + case Tfunc: + lnk.align = 16; + func(parsefn(&lnk)); + break; + case Tdata: + parsedat(data, &lnk); + break; + case Ttype: + parsetyp(); + break; + case Teof: + for (n=0; ntype) { + case CUndef: + break; + case CAddr: + if (c->sym.type == SThr) + fprintf(f, "thread "); + fprintf(f, "$%s", str(c->sym.id)); + if (c->bits.i) + fprintf(f, "%+"PRIi64, c->bits.i); + break; + case CBits: + if (c->flt == 1) + fprintf(f, "s_%f", c->bits.s); + else if (c->flt == 2) + fprintf(f, "d_%lf", c->bits.d); + else + fprintf(f, "%"PRIi64, c->bits.i); + break; + } +} + +void +printref(Ref r, Fn *fn, FILE *f) +{ + int i; + Mem *m; + + switch (rtype(r)) { + case RTmp: + if (r.val < Tmp0) + fprintf(f, "R%d", r.val); + else + fprintf(f, "%%%s", fn->tmp[r.val].name); + break; + case RCon: + if (req(r, UNDEF)) + fprintf(f, "UNDEF"); + else + printcon(&fn->con[r.val], f); + break; + case RSlot: + fprintf(f, "S%d", rsval(r)); + break; + case RCall: + fprintf(f, "%04x", r.val); + break; + case RType: + fprintf(f, ":%s", typ[r.val].name); + break; + case RMem: + i = 0; + m = &fn->mem[r.val]; + fputc('[', f); + if (m->offset.type != CUndef) { + printcon(&m->offset, f); + i = 1; + } + if (!req(m->base, R)) { + if (i) + fprintf(f, " + "); + printref(m->base, fn, f); + i = 1; + } + if (!req(m->index, R)) { + if (i) + fprintf(f, " + "); + fprintf(f, "%d * ", m->scale); + printref(m->index, fn, f); + } + fputc(']', f); + break; + case RInt: + fprintf(f, "%d", rsval(r)); + break; + case -1: + fprintf(f, "R"); + break; + } +} + +void +printfn(Fn *fn, FILE *f) +{ + static char ktoc[] = "wlsd"; + static char *jtoa[NJmp] = { + #define X(j) [J##j] = #j, + JMPS(X) + #undef X + }; + Blk *b; + Phi *p; + Ins *i; + uint n; + + fprintf(f, "function $%s() {\n", fn->name); + for (b=fn->start; b; b=b->link) { + fprintf(f, "@%s\n", b->name); + for (p=b->phi; p; p=p->link) { + fprintf(f, "\t"); + printref(p->to, fn, f); + fprintf(f, " =%c phi ", ktoc[p->cls]); + assert(p->narg); + for (n=0;; n++) { + fprintf(f, "@%s ", p->blk[n]->name); + printref(p->arg[n], fn, f); + if (n == p->narg-1) { + fprintf(f, "\n"); + break; + } else + fprintf(f, ", "); + } + } + for (i=b->ins; i<&b->ins[b->nins]; i++) { + fprintf(f, "\t"); + if (!req(i->to, R)) { + printref(i->to, fn, f); + fprintf(f, " =%c ", ktoc[i->cls]); + } + assert(optab[i->op].name); + fprintf(f, "%s", optab[i->op].name); + if (req(i->to, R)) + switch (i->op) { + case Oarg: + case Oswap: + case Oxcmp: + case Oacmp: + case Oacmn: + case Oafcmp: + case Oxtest: + case Oxdiv: + case Oxidiv: + fputc(ktoc[i->cls], f); + } + if (!req(i->arg[0], R)) { + fprintf(f, " "); + printref(i->arg[0], fn, f); + } + if (!req(i->arg[1], R)) { + fprintf(f, ", "); + printref(i->arg[1], fn, f); + } + fprintf(f, "\n"); + } + switch (b->jmp.type) { + case Jret0: + case Jretsb: + case Jretub: + case Jretsh: + case Jretuh: + case Jretw: + case Jretl: + case Jrets: + case Jretd: + case Jretc: + fprintf(f, "\t%s", jtoa[b->jmp.type]); + if (b->jmp.type != Jret0 || !req(b->jmp.arg, R)) { + fprintf(f, " "); + printref(b->jmp.arg, fn, f); + } + if (b->jmp.type == Jretc) + fprintf(f, ", :%s", typ[fn->retty].name); + fprintf(f, "\n"); + break; + case Jhlt: + fprintf(f, "\thlt\n"); + break; + case Jjmp: + if (b->s1 != b->link) + fprintf(f, "\tjmp @%s\n", b->s1->name); + break; + default: + fprintf(f, "\t%s ", jtoa[b->jmp.type]); + if (b->jmp.type == Jjnz) { + printref(b->jmp.arg, fn, f); + fprintf(f, ", "); + } + assert(b->s1 && b->s2); + fprintf(f, "@%s, @%s\n", b->s1->name, b->s2->name); + break; + } + } + fprintf(f, "}\n"); +} diff --git a/src/qbe/rega.c b/src/qbe/rega.c new file mode 100644 index 00000000..ab98cd67 --- /dev/null +++ b/src/qbe/rega.c @@ -0,0 +1,696 @@ +#include "all.h" + +#ifdef TEST_PMOV + #undef assert + #define assert(x) assert_test(#x, x) +#endif + +typedef struct RMap RMap; + +struct RMap { + int t[Tmp0]; + int r[Tmp0]; + int w[Tmp0]; /* wait list, for unmatched hints */ + BSet b[1]; + int n; +}; + +static bits regu; /* registers used */ +static Tmp *tmp; /* function temporaries */ +static Mem *mem; /* function mem references */ +static struct { + Ref src, dst; + int cls; +} pm[Tmp0]; /* parallel move constructed */ +static int npm; /* size of pm */ +static int loop; /* current loop level */ + +static uint stmov; /* stats: added moves */ +static uint stblk; /* stats: added blocks */ + +static int * +hint(int t) +{ + return &tmp[phicls(t, tmp)].hint.r; +} + +static void +sethint(int t, int r) +{ + Tmp *p; + + p = &tmp[phicls(t, tmp)]; + if (p->hint.r == -1 || p->hint.w > loop) { + p->hint.r = r; + p->hint.w = loop; + tmp[t].visit = -1; + } +} + +static void +rcopy(RMap *ma, RMap *mb) +{ + memcpy(ma->t, mb->t, sizeof ma->t); + memcpy(ma->r, mb->r, sizeof ma->r); + memcpy(ma->w, mb->w, sizeof ma->w); + bscopy(ma->b, mb->b); + ma->n = mb->n; +} + +static int +rfind(RMap *m, int t) +{ + int i; + + for (i=0; in; i++) + if (m->t[i] == t) + return m->r[i]; + return -1; +} + +static Ref +rref(RMap *m, int t) +{ + int r, s; + + r = rfind(m, t); + if (r == -1) { + s = tmp[t].slot; + assert(s != -1 && "should have spilled"); + return SLOT(s); + } else + return TMP(r); +} + +static void +radd(RMap *m, int t, int r) +{ + assert((t >= Tmp0 || t == r) && "invalid temporary"); + assert(((T.gpr0 <= r && r < T.gpr0 + T.ngpr) + || (T.fpr0 <= r && r < T.fpr0 + T.nfpr)) + && "invalid register"); + assert(!bshas(m->b, t) && "temporary has mapping"); + assert(!bshas(m->b, r) && "register already allocated"); + assert(m->n <= T.ngpr+T.nfpr && "too many mappings"); + bsset(m->b, t); + bsset(m->b, r); + m->t[m->n] = t; + m->r[m->n] = r; + m->n++; + regu |= BIT(r); +} + +static Ref +ralloctry(RMap *m, int t, int try) +{ + bits regs; + int h, r, r0, r1; + + if (t < Tmp0) { + assert(bshas(m->b, t)); + return TMP(t); + } + if (bshas(m->b, t)) { + r = rfind(m, t); + assert(r != -1); + return TMP(r); + } + r = tmp[t].visit; + if (r == -1 || bshas(m->b, r)) + r = *hint(t); + if (r == -1 || bshas(m->b, r)) { + if (try) + return R; + regs = tmp[phicls(t, tmp)].hint.m; + regs |= m->b->t[0]; + if (KBASE(tmp[t].cls) == 0) { + r0 = T.gpr0; + r1 = r0 + T.ngpr; + } else { + r0 = T.fpr0; + r1 = r0 + T.nfpr; + } + for (r=r0; rb, r)) + goto Found; + die("no more regs"); + } +Found: + radd(m, t, r); + sethint(t, r); + tmp[t].visit = r; + h = *hint(t); + if (h != -1 && h != r) + m->w[h] = t; + return TMP(r); +} + +static inline Ref +ralloc(RMap *m, int t) +{ + return ralloctry(m, t, 0); +} + +static int +rfree(RMap *m, int t) +{ + int i, r; + + assert(t >= Tmp0 || !(BIT(t) & T.rglob)); + if (!bshas(m->b, t)) + return -1; + for (i=0; m->t[i] != t; i++) + assert(i+1 < m->n); + r = m->r[i]; + bsclr(m->b, t); + bsclr(m->b, r); + m->n--; + memmove(&m->t[i], &m->t[i+1], (m->n-i) * sizeof m->t[0]); + memmove(&m->r[i], &m->r[i+1], (m->n-i) * sizeof m->r[0]); + assert(t >= Tmp0 || t == r); + return r; +} + +static void +mdump(RMap *m) +{ + int i; + + for (i=0; in; i++) + if (m->t[i] >= Tmp0) + fprintf(stderr, " (%s, R%d)", + tmp[m->t[i]].name, + m->r[i]); + fprintf(stderr, "\n"); +} + +static void +pmadd(Ref src, Ref dst, int k) +{ + if (npm == Tmp0) + die("cannot have more moves than registers"); + pm[npm].src = src; + pm[npm].dst = dst; + pm[npm].cls = k; + npm++; +} + +enum PMStat { ToMove, Moving, Moved }; + +static int +pmrec(enum PMStat *status, int i, int *k) +{ + int j, c; + + /* note, this routine might emit + * too many large instructions + */ + if (req(pm[i].src, pm[i].dst)) { + status[i] = Moved; + return -1; + } + assert(KBASE(pm[i].cls) == KBASE(*k)); + assert((Kw|Kl) == Kl && (Ks|Kd) == Kd); + *k |= pm[i].cls; + for (j=0; jb, r)) { + /* r is used and not by to */ + assert(r1 != r); + for (n=0; m->r[n] != r; n++) + assert(n+1 < m->n); + t = m->t[n]; + rfree(m, t); + bsset(m->b, r); + ralloc(m, t); + bsclr(m->b, r); + } + t = req(to, R) ? r : to.val; + radd(m, t, r); +} + +static int +regcpy(Ins *i) +{ + return i->op == Ocopy && isreg(i->arg[0]); +} + +static Ins * +dopm(Blk *b, Ins *i, RMap *m) +{ + RMap m0; + int n, r, r1, t, s; + Ins *i1, *ip; + bits def; + + m0 = *m; /* okay since we don't use m0.b */ + m0.b->t = 0; + i1 = ++i; + do { + i--; + move(i->arg[0].val, i->to, m); + } while (i != b->ins && regcpy(i-1)); + assert(m0.n <= m->n); + if (i != b->ins && (i-1)->op == Ocall) { + def = T.retregs((i-1)->arg[1], 0) | T.rglob; + for (r=0; T.rsave[r]>=0; r++) + if (!(BIT(T.rsave[r]) & def)) + move(T.rsave[r], R, m); + } + for (npm=0, n=0; nn; n++) { + t = m->t[n]; + s = tmp[t].slot; + r1 = m->r[n]; + r = rfind(&m0, t); + if (r != -1) + pmadd(TMP(r1), TMP(r), tmp[t].cls); + else if (s != -1) + pmadd(TMP(r1), SLOT(s), tmp[t].cls); + } + for (ip=i; ipto, R)) + rfree(m, ip->to.val); + r = ip->arg[0].val; + if (rfind(m, r) == -1) + radd(m, r, r); + } + pmgen(); + return i; +} + +static int +prio1(Ref r1, Ref r2) +{ + /* trivial heuristic to begin with, + * later we can use the distance to + * the definition instruction + */ + (void) r2; + return *hint(r1.val) != -1; +} + +static void +insert(Ref *r, Ref **rs, int p) +{ + int i; + + rs[i = p] = r; + while (i-- > 0 && prio1(*r, *rs[i])) { + rs[i+1] = rs[i]; + rs[i] = r; + } +} + +static void +doblk(Blk *b, RMap *cur) +{ + int t, x, r, rf, rt, nr; + bits rs; + Ins *i, *i1; + Mem *m; + Ref *ra[4]; + + if (rtype(b->jmp.arg) == RTmp) + b->jmp.arg = ralloc(cur, b->jmp.arg.val); + curi = &insb[NIns]; + for (i1=&b->ins[b->nins]; i1!=b->ins;) { + emiti(*--i1); + i = curi; + rf = -1; + switch (i->op) { + case Ocall: + rs = T.argregs(i->arg[1], 0) | T.rglob; + for (r=0; T.rsave[r]>=0; r++) + if (!(BIT(T.rsave[r]) & rs)) + rfree(cur, T.rsave[r]); + break; + case Ocopy: + if (regcpy(i)) { + curi++; + i1 = dopm(b, i1, cur); + stmov += i+1 - curi; + continue; + } + if (isreg(i->to)) + if (rtype(i->arg[0]) == RTmp) + sethint(i->arg[0].val, i->to.val); + /* fall through */ + default: + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + r = i->to.val; + if (r < Tmp0 && (BIT(r) & T.rglob)) + break; + rf = rfree(cur, r); + if (rf == -1) { + assert(!isreg(i->to)); + curi++; + continue; + } + i->to = TMP(rf); + } + break; + } + for (x=0, nr=0; x<2; x++) + switch (rtype(i->arg[x])) { + case RMem: + m = &mem[i->arg[x].val]; + if (rtype(m->base) == RTmp) + insert(&m->base, ra, nr++); + if (rtype(m->index) == RTmp) + insert(&m->index, ra, nr++); + break; + case RTmp: + insert(&i->arg[x], ra, nr++); + break; + } + for (r=0; rval); + if (i->op == Ocopy && req(i->to, i->arg[0])) + curi++; + + /* try to change the register of a hinted + * temporary if rf is available */ + if (rf != -1 && (t = cur->w[rf]) != 0) + if (!bshas(cur->b, rf) && *hint(t) == rf + && (rt = rfree(cur, t)) != -1) { + tmp[t].visit = -1; + ralloc(cur, t); + assert(bshas(cur->b, rf)); + emit(Ocopy, tmp[t].cls, TMP(rt), TMP(rf), R); + stmov += 1; + cur->w[rf] = 0; + for (r=0; rloop == bb->loop) + return ba->id > bb->id ? -1 : ba->id < bb->id; + return ba->loop > bb->loop ? -1 : +1; +} + +/* comparison function to order temporaries + * for allocation at the end of blocks */ +static int +prio2(int t1, int t2) +{ + if ((tmp[t1].visit ^ tmp[t2].visit) < 0) /* != signs */ + return tmp[t1].visit != -1 ? +1 : -1; + if ((*hint(t1) ^ *hint(t2)) < 0) + return *hint(t1) != -1 ? +1 : -1; + return tmp[t1].cost - tmp[t2].cost; +} + +/* register allocation + * depends on rpo, phi, cost, (and obviously spill) + */ +void +rega(Fn *fn) +{ + int j, t, r, x, rl[Tmp0]; + Blk *b, *b1, *s, ***ps, *blist, **blk, **bp; + RMap *end, *beg, cur, old, *m; + Ins *i; + Phi *p; + uint u, n; + Ref src, dst; + + /* 1. setup */ + stmov = 0; + stblk = 0; + regu = 0; + tmp = fn->tmp; + mem = fn->mem; + blk = alloc(fn->nblk * sizeof blk[0]); + end = alloc(fn->nblk * sizeof end[0]); + beg = alloc(fn->nblk * sizeof beg[0]); + for (n=0; nnblk; n++) { + bsinit(end[n].b, fn->ntmp); + bsinit(beg[n].b, fn->ntmp); + } + bsinit(cur.b, fn->ntmp); + bsinit(old.b, fn->ntmp); + + loop = INT_MAX; + for (t=0; tntmp; t++) { + tmp[t].hint.r = t < Tmp0 ? t : -1; + tmp[t].hint.w = loop; + tmp[t].visit = -1; + } + for (bp=blk, b=fn->start; b; b=b->link) + *bp++ = b; + qsort(blk, fn->nblk, sizeof blk[0], carve); + for (b=fn->start, i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op != Ocopy || !isreg(i->arg[0])) + break; + else { + assert(rtype(i->to) == RTmp); + sethint(i->to.val, i->arg[0].val); + } + + /* 2. assign registers */ + for (bp=blk; bp<&blk[fn->nblk]; bp++) { + b = *bp; + n = b->id; + loop = b->loop; + cur.n = 0; + bszero(cur.b); + memset(cur.w, 0, sizeof cur.w); + for (x=0, t=Tmp0; bsiter(b->out, &t); t++) { + j = x++; + rl[j] = t; + while (j-- > 0 && prio2(t, rl[j]) > 0) { + rl[j+1] = rl[j]; + rl[j] = t; + } + } + for (r=0; bsiter(b->out, &r) && rin, cur.b); + for (p=b->phi; p; p=p->link) + if (rtype(p->to) == RTmp) + bsclr(b->in, p->to.val); + rcopy(&beg[n], &cur); + } + + /* 3. emit copies shared by multiple edges + * to the same block */ + for (s=fn->start; s; s=s->link) { + if (s->npred <= 1) + continue; + m = &beg[s->id]; + + /* rl maps a register that is live at the + * beginning of s to the one used in all + * predecessors (if any, -1 otherwise) */ + memset(rl, 0, sizeof rl); + + /* to find the register of a phi in a + * predecessor, we have to find the + * corresponding argument */ + for (p=s->phi; p; p=p->link) { + if (rtype(p->to) != RTmp + || (r=rfind(m, p->to.val)) == -1) + continue; + for (u=0; unarg; u++) { + b = p->blk[u]; + src = p->arg[u]; + if (rtype(src) != RTmp) + continue; + x = rfind(&end[b->id], src.val); + if (x == -1) /* spilled */ + continue; + rl[r] = (!rl[r] || rl[r] == x) ? x : -1; + } + if (rl[r] == 0) + rl[r] = -1; + } + + /* process non-phis temporaries */ + for (j=0; jn; j++) { + t = m->t[j]; + r = m->r[j]; + if (rl[r] || t < Tmp0 /* todo, remove this */) + continue; + for (bp=s->pred; bp<&s->pred[s->npred]; bp++) { + x = rfind(&end[(*bp)->id], t); + if (x == -1) /* spilled */ + continue; + rl[r] = (!rl[r] || rl[r] == x) ? x : -1; + } + if (rl[r] == 0) + rl[r] = -1; + } + + npm = 0; + for (j=0; jn; j++) { + t = m->t[j]; + r = m->r[j]; + x = rl[r]; + assert(x != 0 || t < Tmp0 /* todo, ditto */); + if (x > 0 && !bshas(m->b, x)) { + pmadd(TMP(x), TMP(r), tmp[t].cls); + m->r[j] = x; + bsset(m->b, x); + } + } + curi = &insb[NIns]; + pmgen(); + j = &insb[NIns] - curi; + if (j == 0) + continue; + stmov += j; + s->nins += j; + i = alloc(s->nins * sizeof(Ins)); + icpy(icpy(i, curi, j), s->ins, s->nins-j); + s->ins = i; + } + + if (debug['R']) { + fprintf(stderr, "\n> Register mappings:\n"); + for (n=0; nnblk; n++) { + b = fn->rpo[n]; + fprintf(stderr, "\t%-10s beg", b->name); + mdump(&beg[n]); + fprintf(stderr, "\t end"); + mdump(&end[n]); + } + fprintf(stderr, "\n"); + } + + /* 4. emit remaining copies in new blocks */ + blist = 0; + for (b=fn->start;; b=b->link) { + ps = (Blk**[3]){&b->s1, &b->s2, (Blk*[1]){0}}; + for (; (s=**ps); ps++) { + npm = 0; + for (p=s->phi; p; p=p->link) { + dst = p->to; + assert(rtype(dst)==RSlot || rtype(dst)==RTmp); + if (rtype(dst) == RTmp) { + r = rfind(&beg[s->id], dst.val); + if (r == -1) + continue; + dst = TMP(r); + } + for (u=0; p->blk[u]!=b; u++) + assert(u+1 < p->narg); + src = p->arg[u]; + if (rtype(src) == RTmp) + src = rref(&end[b->id], src.val); + pmadd(src, dst, p->cls); + } + for (t=Tmp0; bsiter(s->in, &t); t++) { + src = rref(&end[b->id], t); + dst = rref(&beg[s->id], t); + pmadd(src, dst, tmp[t].cls); + } + curi = &insb[NIns]; + pmgen(); + if (curi == &insb[NIns]) + continue; + b1 = newblk(); + b1->loop = (b->loop+s->loop) / 2; + b1->link = blist; + blist = b1; + fn->nblk++; + strf(b1->name, "%s_%s", b->name, s->name); + stmov += &insb[NIns]-curi; + stblk += 1; + idup(b1, curi, &insb[NIns]-curi); + b1->jmp.type = Jjmp; + b1->s1 = s; + **ps = b1; + } + if (!b->link) { + b->link = blist; + break; + } + } + for (b=fn->start; b; b=b->link) + b->phi = 0; + fn->reg = regu; + + if (debug['R']) { + fprintf(stderr, "\n> Register allocation statistics:\n"); + fprintf(stderr, "\tnew moves: %d\n", stmov); + fprintf(stderr, "\tnew blocks: %d\n", stblk); + fprintf(stderr, "\n> After register allocation:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/rv64/abi.c b/src/qbe/rv64/abi.c new file mode 100644 index 00000000..1c51bfce --- /dev/null +++ b/src/qbe/rv64/abi.c @@ -0,0 +1,653 @@ +#include "all.h" + +/* the risc-v lp64d abi */ + +typedef struct Class Class; +typedef struct Insl Insl; +typedef struct Params Params; + +enum { + Cptr = 1, /* replaced by a pointer */ + Cstk1 = 2, /* pass first XLEN on the stack */ + Cstk2 = 4, /* pass second XLEN on the stack */ + Cstk = Cstk1 | Cstk2, + Cfpint = 8, /* float passed like integer */ +}; + +struct Class { + char class; + Typ *type; + int reg[2]; + int cls[2]; + int off[2]; + char ngp; /* only valid after typclass() */ + char nfp; /* ditto */ + char nreg; +}; + +struct Insl { + Ins i; + Insl *link; +}; + +struct Params { + int ngp; + int nfp; + int stk; /* stack offset for varargs */ +}; + +static int gpreg[10] = {A0, A1, A2, A3, A4, A5, A6, A7}; +static int fpreg[10] = {FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7}; + +/* layout of call's second argument (RCall) + * + * 29 12 8 4 2 0 + * |0.00|x|xxxx|xxxx|xx|xx| range + * | | | | ` gp regs returned (0..2) + * | | | ` fp regs returned (0..2) + * | | ` gp regs passed (0..8) + * | ` fp regs passed (0..8) + * ` env pointer passed in t5 (0..1) + */ + +bits +rv64_retregs(Ref r, int p[2]) +{ + bits b; + int ngp, nfp; + + assert(rtype(r) == RCall); + ngp = r.val & 3; + nfp = (r.val >> 2) & 3; + if (p) { + p[0] = ngp; + p[1] = nfp; + } + b = 0; + while (ngp--) + b |= BIT(A0+ngp); + while (nfp--) + b |= BIT(FA0+nfp); + return b; +} + +bits +rv64_argregs(Ref r, int p[2]) +{ + bits b; + int ngp, nfp, t5; + + assert(rtype(r) == RCall); + ngp = (r.val >> 4) & 15; + nfp = (r.val >> 8) & 15; + t5 = (r.val >> 12) & 1; + if (p) { + p[0] = ngp + t5; + p[1] = nfp; + } + b = 0; + while (ngp--) + b |= BIT(A0+ngp); + while (nfp--) + b |= BIT(FA0+nfp); + return b | ((bits)t5 << T5); +} + +static int +fpstruct(Typ *t, int off, Class *c) +{ + Field *f; + int n; + + if (t->isunion) + return -1; + + for (f=*t->fields; f->type != FEnd; f++) + if (f->type == FPad) + off += f->len; + else if (f->type == FTyp) { + if (fpstruct(&typ[f->len], off, c) == -1) + return -1; + } + else { + n = c->nfp + c->ngp; + if (n == 2) + return -1; + switch (f->type) { + default: die("unreachable"); + case Fb: + case Fh: + case Fw: c->cls[n] = Kw; c->ngp++; break; + case Fl: c->cls[n] = Kl; c->ngp++; break; + case Fs: c->cls[n] = Ks; c->nfp++; break; + case Fd: c->cls[n] = Kd; c->nfp++; break; + } + c->off[n] = off; + off += f->len; + } + + return c->nfp; +} + +static void +typclass(Class *c, Typ *t, int fpabi, int *gp, int *fp) +{ + uint n; + int i; + + c->type = t; + c->class = 0; + c->ngp = 0; + c->nfp = 0; + + if (t->align > 4) + err("alignments larger than 16 are not supported"); + + if (t->isdark || t->size > 16 || t->size == 0) { + /* large structs are replaced by a + * pointer to some caller-allocated + * memory + */ + c->class |= Cptr; + *c->cls = Kl; + *c->off = 0; + c->ngp = 1; + } + else if (!fpabi || fpstruct(t, 0, c) <= 0) { + for (n=0; 8*nsize; n++) { + c->cls[n] = Kl; + c->off[n] = 8*n; + } + c->nfp = 0; + c->ngp = n; + } + + c->nreg = c->nfp + c->ngp; + for (i=0; inreg; i++) + if (KBASE(c->cls[i]) == 0) + c->reg[i] = *gp++; + else + c->reg[i] = *fp++; +} + +static void +sttmps(Ref tmp[], int ntmp, Class *c, Ref mem, Fn *fn) +{ + static int st[] = { + [Kw] = Ostorew, [Kl] = Ostorel, + [Ks] = Ostores, [Kd] = Ostored + }; + int i; + Ref r; + + assert(ntmp > 0); + assert(ntmp <= 2); + for (i=0; icls[i], fn); + r = newtmp("abi", Kl, fn); + emit(st[c->cls[i]], 0, R, tmp[i], r); + emit(Oadd, Kl, r, mem, getcon(c->off[i], fn)); + } +} + +static void +ldregs(Class *c, Ref mem, Fn *fn) +{ + int i; + Ref r; + + for (i=0; inreg; i++) { + r = newtmp("abi", Kl, fn); + emit(Oload, c->cls[i], TMP(c->reg[i]), r, R); + emit(Oadd, Kl, r, mem, getcon(c->off[i], fn)); + } +} + +static void +selret(Blk *b, Fn *fn) +{ + int j, k, cty; + Ref r; + Class cr; + + j = b->jmp.type; + + if (!isret(j) || j == Jret0) + return; + + r = b->jmp.arg; + b->jmp.type = Jret0; + + if (j == Jretc) { + typclass(&cr, &typ[fn->retty], 1, gpreg, fpreg); + if (cr.class & Cptr) { + assert(rtype(fn->retr) == RTmp); + emit(Oblit1, 0, R, INT(cr.type->size), R); + emit(Oblit0, 0, R, r, fn->retr); + cty = 0; + } else { + ldregs(&cr, r, fn); + cty = (cr.nfp << 2) | cr.ngp; + } + } else { + k = j - Jretw; + if (KBASE(k) == 0) { + emit(Ocopy, k, TMP(A0), r, R); + cty = 1; + } else { + emit(Ocopy, k, TMP(FA0), r, R); + cty = 1 << 2; + } + } + + b->jmp.arg = CALL(cty); +} + +static int +argsclass(Ins *i0, Ins *i1, Class *carg, int retptr) +{ + int ngp, nfp, *gp, *fp, vararg, envc; + Class *c; + Typ *t; + Ins *i; + + gp = gpreg; + fp = fpreg; + ngp = 8; + nfp = 8; + vararg = 0; + envc = 0; + if (retptr) { + gp++; + ngp--; + } + for (i=i0, c=carg; iop) { + case Opar: + case Oarg: + *c->cls = i->cls; + if (!vararg && KBASE(i->cls) == 1 && nfp > 0) { + nfp--; + *c->reg = *fp++; + } else if (ngp > 0) { + if (KBASE(i->cls) == 1) + c->class |= Cfpint; + ngp--; + *c->reg = *gp++; + } else + c->class |= Cstk1; + break; + case Oargv: + vararg = 1; + break; + case Oparc: + case Oargc: + t = &typ[i->arg[0].val]; + typclass(c, t, 1, gp, fp); + if (c->nfp > 0) + if (c->nfp >= nfp || c->ngp >= ngp) + typclass(c, t, 0, gp, fp); + assert(c->nfp <= nfp); + if (c->ngp <= ngp) { + ngp -= c->ngp; + nfp -= c->nfp; + gp += c->ngp; + fp += c->nfp; + } else if (ngp > 0) { + assert(c->ngp == 2); + assert(c->class == 0); + c->class |= Cstk2; + c->nreg = 1; + ngp--; + gp++; + } else { + c->class |= Cstk1; + if (c->nreg > 1) + c->class |= Cstk2; + c->nreg = 0; + } + break; + case Opare: + case Oarge: + *c->reg = T5; + *c->cls = Kl; + envc = 1; + break; + } + } + return envc << 12 | (gp-gpreg) << 4 | (fp-fpreg) << 8; +} + +static void +stkblob(Ref r, Typ *t, Fn *fn, Insl **ilp) +{ + Insl *il; + int al; + uint64_t sz; + + il = alloc(sizeof *il); + al = t->align - 2; /* specific to NAlign == 3 */ + if (al < 0) + al = 0; + sz = (t->size + 7) & ~7; + il->i = (Ins){Oalloc+al, Kl, r, {getcon(sz, fn)}}; + il->link = *ilp; + *ilp = il; +} + +static void +selcall(Fn *fn, Ins *i0, Ins *i1, Insl **ilp) +{ + Ins *i; + Class *ca, *c, cr; + int j, k, cty; + uint64_t stk, off; + Ref r, r1, r2, tmp[2]; + + ca = alloc((i1-i0) * sizeof ca[0]); + cr.class = 0; + + if (!req(i1->arg[1], R)) + typclass(&cr, &typ[i1->arg[1].val], 1, gpreg, fpreg); + + cty = argsclass(i0, i1, ca, cr.class & Cptr); + stk = 0; + for (i=i0, c=ca; iop == Oargv) + continue; + if (c->class & Cptr) { + i->arg[0] = newtmp("abi", Kl, fn); + stkblob(i->arg[0], c->type, fn, ilp); + i->op = Oarg; + } + if (c->class & Cstk1) + stk += 8; + if (c->class & Cstk2) + stk += 8; + } + stk += stk & 15; + if (stk) + emit(Osalloc, Kl, R, getcon(-stk, fn), R); + + if (!req(i1->arg[1], R)) { + stkblob(i1->to, cr.type, fn, ilp); + cty |= (cr.nfp << 2) | cr.ngp; + if (cr.class & Cptr) + /* spill & rega expect calls to be + * followed by copies from regs, + * so we emit a dummy + */ + emit(Ocopy, Kw, R, TMP(A0), R); + else { + sttmps(tmp, cr.nreg, &cr, i1->to, fn); + for (j=0; jcls) == 0) { + emit(Ocopy, i1->cls, i1->to, TMP(A0), R); + cty |= 1; + } else { + emit(Ocopy, i1->cls, i1->to, TMP(FA0), R); + cty |= 1 << 2; + } + + emit(Ocall, 0, R, i1->arg[0], CALL(cty)); + + if (cr.class & Cptr) + /* struct return argument */ + emit(Ocopy, Kl, TMP(A0), i1->to, R); + + /* move arguments into registers */ + for (i=i0, c=ca; iop == Oargv || c->class & Cstk1) + continue; + if (i->op == Oargc) { + ldregs(c, i->arg[1], fn); + } else if (c->class & Cfpint) { + k = KWIDE(*c->cls) ? Kl : Kw; + r = newtmp("abi", k, fn); + emit(Ocopy, k, TMP(*c->reg), r, R); + *c->reg = r.val; + } else { + emit(Ocopy, *c->cls, TMP(*c->reg), i->arg[0], R); + } + } + + for (i=i0, c=ca; iclass & Cfpint) { + k = KWIDE(*c->cls) ? Kl : Kw; + emit(Ocast, k, TMP(*c->reg), i->arg[0], R); + } + if (c->class & Cptr) { + emit(Oblit1, 0, R, INT(c->type->size), R); + emit(Oblit0, 0, R, i->arg[1], i->arg[0]); + } + } + + if (!stk) + return; + + /* populate the stack */ + off = 0; + r = newtmp("abi", Kl, fn); + for (i=i0, c=ca; iop == Oargv || !(c->class & Cstk)) + continue; + if (i->op == Oarg) { + r1 = newtmp("abi", Kl, fn); + emit(Ostorew+i->cls, Kw, R, i->arg[0], r1); + if (i->cls == Kw) { + /* TODO: we only need this sign + * extension for l temps passed + * as w arguments + * (see rv64/isel.c:fixarg) + */ + curi->op = Ostorel; + curi->arg[0] = newtmp("abi", Kl, fn); + emit(Oextsw, Kl, curi->arg[0], i->arg[0], R); + } + emit(Oadd, Kl, r1, r, getcon(off, fn)); + off += 8; + } + if (i->op == Oargc) { + if (c->class & Cstk1) { + r1 = newtmp("abi", Kl, fn); + r2 = newtmp("abi", Kl, fn); + emit(Ostorel, 0, R, r2, r1); + emit(Oadd, Kl, r1, r, getcon(off, fn)); + emit(Oload, Kl, r2, i->arg[1], R); + off += 8; + } + if (c->class & Cstk2) { + r1 = newtmp("abi", Kl, fn); + r2 = newtmp("abi", Kl, fn); + emit(Ostorel, 0, R, r2, r1); + emit(Oadd, Kl, r1, r, getcon(off, fn)); + r1 = newtmp("abi", Kl, fn); + emit(Oload, Kl, r2, r1, R); + emit(Oadd, Kl, r1, i->arg[1], getcon(8, fn)); + off += 8; + } + } + } + emit(Osalloc, Kl, r, getcon(stk, fn), R); +} + +static Params +selpar(Fn *fn, Ins *i0, Ins *i1) +{ + Class *ca, *c, cr; + Insl *il; + Ins *i; + int j, k, s, cty, nt; + Ref r, tmp[17], *t; + + ca = alloc((i1-i0) * sizeof ca[0]); + cr.class = 0; + curi = &insb[NIns]; + + if (fn->retty >= 0) { + typclass(&cr, &typ[fn->retty], 1, gpreg, fpreg); + if (cr.class & Cptr) { + fn->retr = newtmp("abi", Kl, fn); + emit(Ocopy, Kl, fn->retr, TMP(A0), R); + } + } + + cty = argsclass(i0, i1, ca, cr.class & Cptr); + fn->reg = rv64_argregs(CALL(cty), 0); + + il = 0; + t = tmp; + for (i=i0, c=ca; iclass & Cfpint) { + r = i->to; + k = *c->cls; + *c->cls = KWIDE(k) ? Kl : Kw; + i->to = newtmp("abi", k, fn); + emit(Ocast, k, r, i->to, R); + } + if (i->op == Oparc) + if (!(c->class & Cptr)) + if (c->nreg != 0) { + nt = c->nreg; + if (c->class & Cstk2) { + c->cls[1] = Kl; + c->off[1] = 8; + assert(nt == 1); + nt = 2; + } + sttmps(t, nt, c, i->to, fn); + stkblob(i->to, c->type, fn, &il); + t += nt; + } + } + for (; il; il=il->link) + emiti(il->i); + + t = tmp; + s = 2 + 8*fn->vararg; + for (i=i0, c=ca; iop == Oparc && !(c->class & Cptr)) { + if (c->nreg == 0) { + fn->tmp[i->to.val].slot = -s; + s += (c->class & Cstk2) ? 2 : 1; + continue; + } + for (j=0; jnreg; j++) { + r = TMP(c->reg[j]); + emit(Ocopy, c->cls[j], *t++, r, R); + } + if (c->class & Cstk2) { + emit(Oload, Kl, *t, SLOT(-s), R); + t++, s++; + } + } else if (c->class & Cstk1) { + emit(Oload, *c->cls, i->to, SLOT(-s), R); + s++; + } else { + emit(Ocopy, *c->cls, i->to, TMP(*c->reg), R); + } + + return (Params){ + .stk = s, + .ngp = (cty >> 4) & 15, + .nfp = (cty >> 8) & 15, + }; +} + +static void +selvaarg(Fn *fn, Ins *i) +{ + Ref loc, newloc; + + loc = newtmp("abi", Kl, fn); + newloc = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, newloc, i->arg[0]); + emit(Oadd, Kl, newloc, loc, getcon(8, fn)); + emit(Oload, i->cls, i->to, loc, R); + emit(Oload, Kl, loc, i->arg[0], R); +} + +static void +selvastart(Fn *fn, Params p, Ref ap) +{ + Ref rsave; + int s; + + rsave = newtmp("abi", Kl, fn); + emit(Ostorel, Kw, R, rsave, ap); + s = p.stk > 2 + 8 * fn->vararg ? p.stk : 2 + p.ngp; + emit(Oaddr, Kl, rsave, SLOT(-s), R); +} + +void +rv64_abi(Fn *fn) +{ + Blk *b; + Ins *i, *i0; + Insl *il; + int n0, n1, ioff; + Params p; + + for (b=fn->start; b; b=b->link) + b->visit = 0; + + /* lower parameters */ + for (b=fn->start, i=b->ins; i<&b->ins[b->nins]; i++) + if (!ispar(i->op)) + break; + p = selpar(fn, b->ins, i); + n0 = &insb[NIns] - curi; + ioff = i - b->ins; + n1 = b->nins - ioff; + vgrow(&b->ins, n0+n1); + icpy(b->ins+n0, b->ins+ioff, n1); + icpy(b->ins, curi, n0); + b->nins = n0+n1; + + /* lower calls, returns, and vararg instructions */ + il = 0; + b = fn->start; + do { + if (!(b = b->link)) + b = fn->start; /* do it last */ + if (b->visit) + continue; + curi = &insb[NIns]; + selret(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) + switch ((--i)->op) { + default: + emiti(*i); + break; + case Ocall: + for (i0=i; i0>b->ins; i0--) + if (!isarg((i0-1)->op)) + break; + selcall(fn, i0, i, &il); + i = i0; + break; + case Ovastart: + selvastart(fn, p, i->arg[0]); + break; + case Ovaarg: + selvaarg(fn, i); + break; + case Oarg: + case Oargc: + die("unreachable"); + } + if (b == fn->start) + for (; il; il=il->link) + emiti(il->i); + idup(b, curi, &insb[NIns]-curi); + } while (b != fn->start); + + if (debug['A']) { + fprintf(stderr, "\n> After ABI lowering:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/rv64/all.h b/src/qbe/rv64/all.h new file mode 100644 index 00000000..a03973c3 --- /dev/null +++ b/src/qbe/rv64/all.h @@ -0,0 +1,52 @@ +#include "../all.h" + +typedef struct Rv64Op Rv64Op; + +enum Rv64Reg { + /* caller-save */ + T0 = RXX + 1, T1, T2, T3, T4, T5, + A0, A1, A2, A3, A4, A5, A6, A7, + + /* callee-save */ + S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, + + /* globally live */ + FP, SP, GP, TP, RA, + + /* FP caller-save */ + FT0, FT1, FT2, FT3, FT4, FT5, FT6, FT7, FT8, FT9, FT10, + FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7, + + /* FP callee-save */ + FS0, FS1, FS2, FS3, FS4, FS5, FS6, FS7, FS8, FS9, FS10, FS11, + + /* reserved (see rv64/emit.c) */ + T6, FT11, + + NFPR = FS11 - FT0 + 1, + NGPR = RA - T0 + 1, + NGPS = A7 - T0 + 1, + NFPS = FA7 - FT0 + 1, + NCLR = (S11 - S1 + 1) + (FS11 - FS0 + 1), +}; +MAKESURE(reg_not_tmp, FT11 < (int)Tmp0); + +struct Rv64Op { + char imm; +}; + +/* targ.c */ +extern int rv64_rsave[]; +extern int rv64_rclob[]; +extern Rv64Op rv64_op[]; + +/* abi.c */ +bits rv64_retregs(Ref, int[2]); +bits rv64_argregs(Ref, int[2]); +void rv64_abi(Fn *); + +/* isel.c */ +void rv64_isel(Fn *); + +/* emit.c */ +void rv64_emitfn(Fn *, FILE *); diff --git a/src/qbe/rv64/emit.c b/src/qbe/rv64/emit.c new file mode 100644 index 00000000..c1f29acd --- /dev/null +++ b/src/qbe/rv64/emit.c @@ -0,0 +1,569 @@ +#include "all.h" + +enum { + Ki = -1, /* matches Kw and Kl */ + Ka = -2, /* matches all classes */ +}; + +static struct { + short op; + short cls; + char *fmt; +} omap[] = { + { Oadd, Ki, "add%k %=, %0, %1" }, + { Oadd, Ka, "fadd.%k %=, %0, %1" }, + { Osub, Ki, "sub%k %=, %0, %1" }, + { Osub, Ka, "fsub.%k %=, %0, %1" }, + { Oneg, Ki, "neg%k %=, %0" }, + { Oneg, Ka, "fneg.%k %=, %0" }, + { Odiv, Ki, "div%k %=, %0, %1" }, + { Odiv, Ka, "fdiv.%k %=, %0, %1" }, + { Orem, Ki, "rem%k %=, %0, %1" }, + { Orem, Kl, "rem %=, %0, %1" }, + { Oudiv, Ki, "divu%k %=, %0, %1" }, + { Ourem, Ki, "remu%k %=, %0, %1" }, + { Omul, Ki, "mul%k %=, %0, %1" }, + { Omul, Ka, "fmul.%k %=, %0, %1" }, + { Oand, Ki, "and %=, %0, %1" }, + { Oor, Ki, "or %=, %0, %1" }, + { Oxor, Ki, "xor %=, %0, %1" }, + { Osar, Ki, "sra%k %=, %0, %1" }, + { Oshr, Ki, "srl%k %=, %0, %1" }, + { Oshl, Ki, "sll%k %=, %0, %1" }, + { Ocsltl, Ki, "slt %=, %0, %1" }, + { Ocultl, Ki, "sltu %=, %0, %1" }, + { Oceqs, Ki, "feq.s %=, %0, %1" }, + { Ocges, Ki, "fge.s %=, %0, %1" }, + { Ocgts, Ki, "fgt.s %=, %0, %1" }, + { Ocles, Ki, "fle.s %=, %0, %1" }, + { Oclts, Ki, "flt.s %=, %0, %1" }, + { Oceqd, Ki, "feq.d %=, %0, %1" }, + { Ocged, Ki, "fge.d %=, %0, %1" }, + { Ocgtd, Ki, "fgt.d %=, %0, %1" }, + { Ocled, Ki, "fle.d %=, %0, %1" }, + { Ocltd, Ki, "flt.d %=, %0, %1" }, + { Ostoreb, Kw, "sb %0, %M1" }, + { Ostoreh, Kw, "sh %0, %M1" }, + { Ostorew, Kw, "sw %0, %M1" }, + { Ostorel, Ki, "sd %0, %M1" }, + { Ostores, Kw, "fsw %0, %M1" }, + { Ostored, Kw, "fsd %0, %M1" }, + { Oloadsb, Ki, "lb %=, %M0" }, + { Oloadub, Ki, "lbu %=, %M0" }, + { Oloadsh, Ki, "lh %=, %M0" }, + { Oloaduh, Ki, "lhu %=, %M0" }, + { Oloadsw, Ki, "lw %=, %M0" }, + /* riscv64 always sign-extends 32-bit + * values stored in 64-bit registers + */ + { Oloaduw, Kw, "lw %=, %M0" }, + { Oloaduw, Kl, "lwu %=, %M0" }, + { Oload, Kw, "lw %=, %M0" }, + { Oload, Kl, "ld %=, %M0" }, + { Oload, Ks, "flw %=, %M0" }, + { Oload, Kd, "fld %=, %M0" }, + { Oextsb, Ki, "sext.b %=, %0" }, + { Oextub, Ki, "zext.b %=, %0" }, + { Oextsh, Ki, "sext.h %=, %0" }, + { Oextuh, Ki, "zext.h %=, %0" }, + { Oextsw, Kl, "sext.w %=, %0" }, + { Oextuw, Kl, "zext.w %=, %0" }, + { Otruncd, Ks, "fcvt.s.d %=, %0" }, + { Oexts, Kd, "fcvt.d.s %=, %0" }, + { Ostosi, Kw, "fcvt.w.s %=, %0, rtz" }, + { Ostosi, Kl, "fcvt.l.s %=, %0, rtz" }, + { Ostoui, Kw, "fcvt.wu.s %=, %0, rtz" }, + { Ostoui, Kl, "fcvt.lu.s %=, %0, rtz" }, + { Odtosi, Kw, "fcvt.w.d %=, %0, rtz" }, + { Odtosi, Kl, "fcvt.l.d %=, %0, rtz" }, + { Odtoui, Kw, "fcvt.wu.d %=, %0, rtz" }, + { Odtoui, Kl, "fcvt.lu.d %=, %0, rtz" }, + { Oswtof, Ka, "fcvt.%k.w %=, %0" }, + { Ouwtof, Ka, "fcvt.%k.wu %=, %0" }, + { Osltof, Ka, "fcvt.%k.l %=, %0" }, + { Oultof, Ka, "fcvt.%k.lu %=, %0" }, + { Ocast, Kw, "fmv.x.w %=, %0" }, + { Ocast, Kl, "fmv.x.d %=, %0" }, + { Ocast, Ks, "fmv.w.x %=, %0" }, + { Ocast, Kd, "fmv.d.x %=, %0" }, + { Ocopy, Ki, "mv %=, %0" }, + { Ocopy, Ka, "fmv.%k %=, %0" }, + { Oswap, Ki, "mv %?, %0\n\tmv %0, %1\n\tmv %1, %?" }, + { Oswap, Ka, "fmv.%k %?, %0\n\tfmv.%k %0, %1\n\tfmv.%k %1, %?" }, + { Oreqz, Ki, "seqz %=, %0" }, + { Ornez, Ki, "snez %=, %0" }, + { Ocall, Kw, "jalr %0" }, + { NOp, 0, 0 } +}; + +static char *rname[] = { + [FP] = "fp", + [SP] = "sp", + [GP] = "gp", + [TP] = "tp", + [RA] = "ra", + [T0] = "t0", "t1", "t2", "t3", "t4", "t5", + [A0] = "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", + [S1] = "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", + "s9", "s10", "s11", + [FT0] = "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "ft8", "ft9", "ft10", + [FA0] = "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", + [FS0] = "fs0", "fs1", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", + [T6] = "t6", + [FT11] = "ft11", +}; + +static int64_t +slot(Ref r, Fn *fn) +{ + int s; + + s = rsval(r); + assert(s <= fn->slot); + if (s < 0) + return 8 * -s; + else + return -4 * (fn->slot - s); +} + +static void +emitaddr(Con *c, FILE *f) +{ + assert(c->sym.type == SGlo); + fputs(str(c->sym.id), f); + if (c->bits.i) + fprintf(f, "+%"PRIi64, c->bits.i); +} + +static void +emitf(char *s, Ins *i, Fn *fn, FILE *f) +{ + static char clschr[] = {'w', 'l', 's', 'd'}; + Ref r; + int k, c; + Con *pc; + int64_t offset; + + fputc('\t', f); + for (;;) { + k = i->cls; + while ((c = *s++) != '%') + if (!c) { + fputc('\n', f); + return; + } else + fputc(c, f); + switch ((c = *s++)) { + default: + die("invalid escape"); + case '?': + if (KBASE(k) == 0) + fputs("t6", f); + else + fputs("ft11", f); + break; + case 'k': + if (i->cls != Kl) + fputc(clschr[i->cls], f); + break; + case '=': + case '0': + r = c == '=' ? i->to : i->arg[0]; + assert(isreg(r)); + fputs(rname[r.val], f); + break; + case '1': + r = i->arg[1]; + switch (rtype(r)) { + default: + die("invalid second argument"); + case RTmp: + assert(isreg(r)); + fputs(rname[r.val], f); + break; + case RCon: + pc = &fn->con[r.val]; + assert(pc->type == CBits); + assert(pc->bits.i >= -2048 && pc->bits.i < 2048); + fprintf(f, "%d", (int)pc->bits.i); + break; + } + break; + case 'M': + c = *s++; + assert(c == '0' || c == '1'); + r = i->arg[c - '0']; + switch (rtype(r)) { + default: + die("invalid address argument"); + case RTmp: + fprintf(f, "0(%s)", rname[r.val]); + break; + case RCon: + pc = &fn->con[r.val]; + assert(pc->type == CAddr); + emitaddr(pc, f); + if (isstore(i->op) + || (isload(i->op) && KBASE(i->cls) == 1)) { + /* store (and float load) + * pseudo-instructions need a + * temporary register in which to + * load the address + */ + fprintf(f, ", t6"); + } + break; + case RSlot: + offset = slot(r, fn); + assert(offset >= -2048 && offset <= 2047); + fprintf(f, "%d(fp)", (int)offset); + break; + } + break; + } + } +} + +static void +loadaddr(Con *c, char *rn, FILE *f) +{ + char off[32]; + + if (c->sym.type == SThr) { + if (c->bits.i) + sprintf(off, "+%"PRIi64, c->bits.i); + else + off[0] = 0; + fprintf(f, "\tlui %s, %%tprel_hi(%s)%s\n", + rn, str(c->sym.id), off); + fprintf(f, "\tadd %s, %s, tp, %%tprel_add(%s)%s\n", + rn, rn, str(c->sym.id), off); + fprintf(f, "\taddi %s, %s, %%tprel_lo(%s)%s\n", + rn, rn, str(c->sym.id), off); + } else { + fprintf(f, "\tla %s, ", rn); + emitaddr(c, f); + fputc('\n', f); + } +} + +static void +loadcon(Con *c, int r, int k, FILE *f) +{ + char *rn; + int64_t n; + + rn = rname[r]; + switch (c->type) { + case CAddr: + loadaddr(c, rn, f); + break; + case CBits: + n = c->bits.i; + if (!KWIDE(k)) + n = (int32_t)n; + fprintf(f, "\tli %s, %"PRIi64"\n", rn, n); + break; + default: + die("invalid constant"); + } +} + +static void +fixmem(Ref *pr, Fn *fn, FILE *f) +{ + Ref r; + int64_t s; + Con *c; + + r = *pr; + if (rtype(r) == RCon) { + c = &fn->con[r.val]; + if (c->type == CAddr) + if (c->sym.type == SThr) { + loadcon(c, T6, Kl, f); + *pr = TMP(T6); + } + } + if (rtype(r) == RSlot) { + s = slot(r, fn); + if (s < -2048 || s > 2047) { + fprintf(f, "\tli t6, %"PRId64"\n", s); + fprintf(f, "\tadd t6, fp, t6\n"); + *pr = TMP(T6); + } + } +} + +static void +emitins(Ins *i, Fn *fn, FILE *f) +{ + int o; + char *rn; + int64_t s; + Con *con; + + switch (i->op) { + default: + if (isload(i->op)) + fixmem(&i->arg[0], fn, f); + else if (isstore(i->op)) + fixmem(&i->arg[1], fn, f); + Table: + /* most instructions are just pulled out of + * the table omap[], some special cases are + * detailed below */ + for (o=0;; o++) { + /* this linear search should really be a binary + * search */ + if (omap[o].op == NOp) + die("no match for %s(%c)", + optab[i->op].name, "wlsd"[i->cls]); + if (omap[o].op == i->op) + if (omap[o].cls == i->cls || omap[o].cls == Ka + || (omap[o].cls == Ki && KBASE(i->cls) == 0)) + break; + } + emitf(omap[o].fmt, i, fn, f); + break; + case Ocopy: + if (req(i->to, i->arg[0])) + break; + if (rtype(i->to) == RSlot) { + switch (rtype(i->arg[0])) { + case RSlot: + case RCon: + die("unimplemented"); + break; + default: + assert(isreg(i->arg[0])); + i->arg[1] = i->to; + i->to = R; + switch (i->cls) { + case Kw: i->op = Ostorew; break; + case Kl: i->op = Ostorel; break; + case Ks: i->op = Ostores; break; + case Kd: i->op = Ostored; break; + } + fixmem(&i->arg[1], fn, f); + goto Table; + } + break; + } + assert(isreg(i->to)); + switch (rtype(i->arg[0])) { + case RCon: + loadcon(&fn->con[i->arg[0].val], i->to.val, i->cls, f); + break; + case RSlot: + i->op = Oload; + fixmem(&i->arg[0], fn, f); + goto Table; + default: + assert(isreg(i->arg[0])); + goto Table; + } + break; + case Onop: + break; + case Oaddr: + assert(rtype(i->arg[0]) == RSlot); + rn = rname[i->to.val]; + s = slot(i->arg[0], fn); + if (-s < 2048) { + fprintf(f, "\tadd %s, fp, %"PRId64"\n", rn, s); + } else { + fprintf(f, + "\tli %s, %"PRId64"\n" + "\tadd %s, fp, %s\n", + rn, s, rn, rn + ); + } + break; + case Ocall: + switch (rtype(i->arg[0])) { + case RCon: + con = &fn->con[i->arg[0].val]; + if (con->type != CAddr + || con->sym.type != SGlo + || con->bits.i) + goto Invalid; + fprintf(f, "\tcall %s\n", str(con->sym.id)); + break; + case RTmp: + emitf("jalr %0", i, fn, f); + break; + default: + Invalid: + die("invalid call argument"); + } + break; + case Osalloc: + emitf("sub sp, sp, %0", i, fn, f); + if (!req(i->to, R)) + emitf("mv %=, sp", i, fn, f); + break; + case Odbgloc: + emitdbgloc(i->arg[0].val, i->arg[1].val, f); + break; + } +} + +/* + + Stack-frame layout: + + +=============+ + | varargs | + | save area | + +-------------+ + | saved ra | + | saved fp | + +-------------+ <- fp + | ... | + | spill slots | + | ... | + +-------------+ + | ... | + | locals | + | ... | + +-------------+ + | padding | + +-------------+ + | callee-save | + | registers | + +=============+ + +*/ + +void +rv64_emitfn(Fn *fn, FILE *f) +{ + static int id0; + int lbl, neg, off, frame, *pr, r; + Blk *b, *s; + Ins *i, ii; + + emitfnlnk(fn->name, &fn->lnk, f); + + if (fn->vararg) { + /* TODO: only need space for registers + * unused by named arguments + */ + fprintf(f, "\tadd sp, sp, -64\n"); + for (r=A0; r<=A7; r++) + fprintf(f, + "\tsd %s, %d(sp)\n", + rname[r], 8 * (r - A0) + ); + } + fprintf(f, "\tsd fp, -16(sp)\n"); + fprintf(f, "\tsd ra, -8(sp)\n"); + fprintf(f, "\tadd fp, sp, -16\n"); + + frame = (16 + 4 * fn->slot + 15) & ~15; + for (pr=rv64_rclob; *pr>=0; pr++) { + if (fn->reg & BIT(*pr)) + frame += 8; + } + frame = (frame + 15) & ~15; + + if (frame <= 2048) + fprintf(f, + "\tadd sp, sp, -%d\n", + frame + ); + else + fprintf(f, + "\tli t6, %d\n" + "\tsub sp, sp, t6\n", + frame + ); + for (pr=rv64_rclob, off=0; *pr>=0; pr++) { + if (fn->reg & BIT(*pr)) { + fprintf(f, + "\t%s %s, %d(sp)\n", + *pr < FT0 ? "sd" : "fsd", + rname[*pr], off + ); + off += 8; + } + } + + for (lbl=0, b=fn->start; b; b=b->link) { + if (lbl || b->npred > 1) + fprintf(f, ".L%d:\n", id0+b->id); + for (i=b->ins; i!=&b->ins[b->nins]; i++) + emitins(i, fn, f); + lbl = 1; + switch (b->jmp.type) { + case Jhlt: + fprintf(f, "\tebreak\n"); + break; + case Jret0: + if (fn->dynalloc) { + if (frame - 16 <= 2048) + fprintf(f, + "\tadd sp, fp, -%d\n", + frame - 16 + ); + else + fprintf(f, + "\tli t6, %d\n" + "\tsub sp, fp, t6\n", + frame - 16 + ); + } + for (pr=rv64_rclob, off=0; *pr>=0; pr++) { + if (fn->reg & BIT(*pr)) { + fprintf(f, + "\t%s %s, %d(sp)\n", + *pr < FT0 ? "ld" : "fld", + rname[*pr], off + ); + off += 8; + } + } + fprintf(f, + "\tadd sp, fp, %d\n" + "\tld ra, 8(fp)\n" + "\tld fp, 0(fp)\n" + "\tret\n", + 16 + fn->vararg * 64 + ); + break; + case Jjmp: + Jmp: + if (b->s1 != b->link) + fprintf(f, "\tj .L%d\n", id0+b->s1->id); + else + lbl = 0; + break; + case Jjnz: + neg = 0; + if (b->link == b->s2) { + s = b->s1; + b->s1 = b->s2; + b->s2 = s; + neg = 1; + } + if (rtype(b->jmp.arg) == RSlot) { + ii.arg[0] = b->jmp.arg; + emitf("lw t6, %M0", &ii, fn, f); + b->jmp.arg = TMP(T6); + } + assert(isreg(b->jmp.arg)); + fprintf(f, + "\tb%sz %s, .L%d\n", + neg ? "ne" : "eq", + rname[b->jmp.arg.val], + id0+b->s2->id + ); + goto Jmp; + } + } + id0 += fn->nblk; + elf_emitfnfin(fn->name, f); +} diff --git a/src/qbe/rv64/isel.c b/src/qbe/rv64/isel.c new file mode 100644 index 00000000..ca623599 --- /dev/null +++ b/src/qbe/rv64/isel.c @@ -0,0 +1,255 @@ +#include "all.h" + +static int +memarg(Ref *r, int op, Ins *i) +{ + if (isload(op) || op == Ocall) + return r == &i->arg[0]; + if (isstore(op)) + return r == &i->arg[1]; + return 0; +} + +static int +immarg(Ref *r, int op, Ins *i) +{ + return rv64_op[op].imm && r == &i->arg[1]; +} + +static void +fixarg(Ref *r, int k, Ins *i, Fn *fn) +{ + char buf[32]; + Ref r0, r1; + int s, n, op; + Con *c; + + r0 = r1 = *r; + op = i ? i->op : Ocopy; + switch (rtype(r0)) { + case RCon: + c = &fn->con[r0.val]; + if (c->type == CAddr && memarg(r, op, i)) + break; + if (KBASE(k) == 0) + if (c->type == CBits && immarg(r, op, i)) + if (-2048 <= c->bits.i && c->bits.i < 2048) + break; + r1 = newtmp("isel", k, fn); + if (KBASE(k) == 1) { + /* load floating points from memory + * slots, they can't be used as + * immediates + */ + assert(c->type == CBits); + n = stashbits(c->bits.i, KWIDE(k) ? 8 : 4); + vgrow(&fn->con, ++fn->ncon); + c = &fn->con[fn->ncon-1]; + sprintf(buf, "\"%sfp%d\"", T.asloc, n); + *c = (Con){.type = CAddr}; + c->sym.id = intern(buf); + emit(Oload, k, r1, CON(c-fn->con), R); + break; + } + emit(Ocopy, k, r1, r0, R); + break; + case RTmp: + if (isreg(r0)) + break; + s = fn->tmp[r0.val].slot; + if (s != -1) { + /* aggregate passed by value on + * stack, or fast local address, + * replace with slot if we can + */ + if (memarg(r, op, i)) { + r1 = SLOT(s); + break; + } + r1 = newtmp("isel", k, fn); + emit(Oaddr, k, r1, SLOT(s), R); + break; + } + if (k == Kw && fn->tmp[r0.val].cls == Kl) { + /* TODO: this sign extension isn't needed + * for 32-bit arithmetic instructions + */ + r1 = newtmp("isel", k, fn); + emit(Oextsw, Kl, r1, r0, R); + } else { + assert(k == fn->tmp[r0.val].cls); + } + break; + } + *r = r1; +} + +static void +negate(Ref *pr, Fn *fn) +{ + Ref r; + + r = newtmp("isel", Kw, fn); + emit(Oxor, Kw, *pr, r, getcon(1, fn)); + *pr = r; +} + +static void +selcmp(Ins i, int k, int op, Fn *fn) +{ + Ins *icmp; + Ref r, r0, r1; + int sign, swap, neg; + + switch (op) { + case Cieq: + r = newtmp("isel", k, fn); + emit(Oreqz, i.cls, i.to, r, R); + emit(Oxor, k, r, i.arg[0], i.arg[1]); + icmp = curi; + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); + return; + case Cine: + r = newtmp("isel", k, fn); + emit(Ornez, i.cls, i.to, r, R); + emit(Oxor, k, r, i.arg[0], i.arg[1]); + icmp = curi; + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); + return; + case Cisge: sign = 1, swap = 0, neg = 1; break; + case Cisgt: sign = 1, swap = 1, neg = 0; break; + case Cisle: sign = 1, swap = 1, neg = 1; break; + case Cislt: sign = 1, swap = 0, neg = 0; break; + case Ciuge: sign = 0, swap = 0, neg = 1; break; + case Ciugt: sign = 0, swap = 1, neg = 0; break; + case Ciule: sign = 0, swap = 1, neg = 1; break; + case Ciult: sign = 0, swap = 0, neg = 0; break; + case NCmpI+Cfeq: + case NCmpI+Cfge: + case NCmpI+Cfgt: + case NCmpI+Cfle: + case NCmpI+Cflt: + swap = 0, neg = 0; + break; + case NCmpI+Cfuo: + negate(&i.to, fn); + /* fall through */ + case NCmpI+Cfo: + r0 = newtmp("isel", i.cls, fn); + r1 = newtmp("isel", i.cls, fn); + emit(Oand, i.cls, i.to, r0, r1); + op = KWIDE(k) ? Oceqd : Oceqs; + emit(op, i.cls, r0, i.arg[0], i.arg[0]); + icmp = curi; + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); + emit(op, i.cls, r1, i.arg[1], i.arg[1]); + icmp = curi; + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); + return; + case NCmpI+Cfne: + swap = 0, neg = 1; + i.op = KWIDE(k) ? Oceqd : Oceqs; + break; + default: + assert(0 && "unknown comparison"); + } + if (op < NCmpI) + i.op = sign ? Ocsltl : Ocultl; + if (swap) { + r = i.arg[0]; + i.arg[0] = i.arg[1]; + i.arg[1] = r; + } + if (neg) + negate(&i.to, fn); + emiti(i); + icmp = curi; + fixarg(&icmp->arg[0], k, icmp, fn); + fixarg(&icmp->arg[1], k, icmp, fn); +} + +static void +sel(Ins i, Fn *fn) +{ + Ins *i0; + int ck, cc; + + if (INRANGE(i.op, Oalloc, Oalloc1)) { + i0 = curi - 1; + salloc(i.to, i.arg[0], fn); + fixarg(&i0->arg[0], Kl, i0, fn); + return; + } + if (iscmp(i.op, &ck, &cc)) { + selcmp(i, ck, cc, fn); + return; + } + if (i.op != Onop) { + emiti(i); + i0 = curi; /* fixarg() can change curi */ + fixarg(&i0->arg[0], argcls(&i, 0), i0, fn); + fixarg(&i0->arg[1], argcls(&i, 1), i0, fn); + } +} + +static void +seljmp(Blk *b, Fn *fn) +{ + /* TODO: replace cmp+jnz with beq/bne/blt[u]/bge[u] */ + if (b->jmp.type == Jjnz) + fixarg(&b->jmp.arg, Kw, 0, fn); +} + +void +rv64_isel(Fn *fn) +{ + Blk *b, **sb; + Ins *i; + Phi *p; + uint n; + int al; + int64_t sz; + + /* assign slots to fast allocs */ + b = fn->start; + /* specific to NAlign == 3 */ /* or change n=4 and sz /= 4 below */ + for (al=Oalloc, n=4; al<=Oalloc1; al++, n*=2) + for (i=b->ins; i<&b->ins[b->nins]; i++) + if (i->op == al) { + if (rtype(i->arg[0]) != RCon) + break; + sz = fn->con[i->arg[0].val].bits.i; + if (sz < 0 || sz >= INT_MAX-15) + err("invalid alloc size %"PRId64, sz); + sz = (sz + n-1) & -n; + sz /= 4; + if (sz > INT_MAX - fn->slot) + die("alloc too large"); + fn->tmp[i->to.val].slot = fn->slot; + fn->slot += sz; + *i = (Ins){.op = Onop}; + } + + for (b=fn->start; b; b=b->link) { + curi = &insb[NIns]; + for (sb=(Blk*[3]){b->s1, b->s2, 0}; *sb; sb++) + for (p=(*sb)->phi; p; p=p->link) { + for (n=0; p->blk[n] != b; n++) + assert(n+1 < p->narg); + fixarg(&p->arg[n], p->cls, 0, fn); + } + seljmp(b, fn); + for (i=&b->ins[b->nins]; i!=b->ins;) + sel(*--i, fn); + idup(b, curi, &insb[NIns]-curi); + } + + if (debug['I']) { + fprintf(stderr, "\n> After instruction selection:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/rv64/targ.c b/src/qbe/rv64/targ.c new file mode 100644 index 00000000..fc6632cd --- /dev/null +++ b/src/qbe/rv64/targ.c @@ -0,0 +1,57 @@ +#include "all.h" + +Rv64Op rv64_op[NOp] = { +#define O(op, t, x) [O##op] = +#define V(imm) { imm }, +#include "../ops.h" +}; + +int rv64_rsave[] = { + T0, T1, T2, T3, T4, T5, + A0, A1, A2, A3, A4, A5, A6, A7, + FA0, FA1, FA2, FA3, FA4, FA5, FA6, FA7, + FT0, FT1, FT2, FT3, FT4, FT5, FT6, FT7, + FT8, FT9, FT10, + -1 +}; +int rv64_rclob[] = { + S1, S2, S3, S4, S5, S6, S7, + S8, S9, S10, S11, + FS0, FS1, FS2, FS3, FS4, FS5, FS6, FS7, + FS8, FS9, FS10, FS11, + -1 +}; + +#define RGLOB (BIT(FP) | BIT(SP) | BIT(GP) | BIT(TP) | BIT(RA)) + +static int +rv64_memargs(int op) +{ + (void)op; + return 0; +} + +Target T_rv64 = { + .name = "rv64", + .gpr0 = T0, + .ngpr = NGPR, + .fpr0 = FT0, + .nfpr = NFPR, + .rglob = RGLOB, + .nrglob = 5, + .rsave = rv64_rsave, + .nrsave = {NGPS, NFPS}, + .retregs = rv64_retregs, + .argregs = rv64_argregs, + .memargs = rv64_memargs, + .abi0 = elimsb, + .abi1 = rv64_abi, + .isel = rv64_isel, + .emitfn = rv64_emitfn, + .emitfin = elf_emitfin, + .asloc = ".L", + .cansel = 0, +}; + +MAKESURE(rsave_size_ok, sizeof rv64_rsave == (NGPS+NFPS+1) * sizeof(int)); +MAKESURE(rclob_size_ok, sizeof rv64_rclob == (NCLR+1) * sizeof(int)); diff --git a/src/qbe/simpl.c b/src/qbe/simpl.c new file mode 100644 index 00000000..3774f29e --- /dev/null +++ b/src/qbe/simpl.c @@ -0,0 +1,124 @@ +#include "all.h" + +static void +blit(Ref sd[2], int sz, Fn *fn) +{ + struct { int st, ld, cls, size; } *p, tbl[] = { + { Ostorel, Oload, Kl, 8 }, + { Ostorew, Oload, Kw, 4 }, + { Ostoreh, Oloaduh, Kw, 2 }, + { Ostoreb, Oloadub, Kw, 1 } + }; + Ref r, r1, ro; + int off, fwd, n; + + fwd = sz >= 0; + sz = abs(sz); + off = fwd ? sz : 0; + for (p=tbl; sz; p++) + for (n=p->size; sz>=n; sz-=n) { + off -= fwd ? n : 0; + r = newtmp("blt", Kl, fn); + r1 = newtmp("blt", Kl, fn); + ro = getcon(off, fn); + emit(p->st, 0, R, r, r1); + emit(Oadd, Kl, r1, sd[1], ro); + r1 = newtmp("blt", Kl, fn); + emit(p->ld, p->cls, r, r1, R); + emit(Oadd, Kl, r1, sd[0], ro); + off += fwd ? 0 : n; + } +} + +static int +ulog2_tab64[64] = { + 63, 0, 1, 41, 37, 2, 16, 42, + 38, 29, 32, 3, 12, 17, 43, 55, + 39, 35, 30, 53, 33, 21, 4, 23, + 13, 9, 18, 6, 25, 44, 48, 56, + 62, 40, 36, 15, 28, 31, 11, 54, + 34, 52, 20, 22, 8, 5, 24, 47, + 61, 14, 27, 10, 51, 19, 7, 46, + 60, 26, 50, 45, 59, 49, 58, 57, +}; + +static int +ulog2(uint64_t pow2) +{ + return ulog2_tab64[(pow2 * 0x5b31ab928877a7e) >> 58]; +} + +static int +ispow2(uint64_t v) +{ + return v && (v & (v - 1)) == 0; +} + +static void +ins(Ins **pi, int *new, Blk *b, Fn *fn) +{ + ulong ni; + Con *c; + Ins *i; + Ref r; + int n; + + i = *pi; + /* simplify more instructions here; + * copy 0 into xor, bit rotations, + * etc. */ + switch (i->op) { + case Oblit1: + assert(i > b->ins); + assert((i-1)->op == Oblit0); + if (!*new) { + curi = &insb[NIns]; + ni = &b->ins[b->nins] - (i+1); + curi -= ni; + icpy(curi, i+1, ni); + *new = 1; + } + blit((i-1)->arg, rsval(i->arg[0]), fn); + *pi = i-1; + return; + case Oudiv: + case Ourem: + r = i->arg[1]; + if (KBASE(i->cls) == 0) + if (rtype(r) == RCon) { + c = &fn->con[r.val]; + if (c->type == CBits) + if (ispow2(c->bits.i)) { + n = ulog2(c->bits.i); + if (i->op == Ourem) { + i->op = Oand; + i->arg[1] = getcon((1ull<op = Oshr; + i->arg[1] = getcon(n, fn); + } + } + } + break; + } + if (*new) + emiti(*i); +} + +void +simpl(Fn *fn) +{ + Blk *b; + Ins *i; + int new; + + for (b=fn->start; b; b=b->link) { + new = 0; + for (i=&b->ins[b->nins]; i!=b->ins;) { + --i; + ins(&i, &new, b, fn); + } + if (new) + idup(b, curi, &insb[NIns]-curi); + } +} diff --git a/src/qbe/spill.c b/src/qbe/spill.c new file mode 100644 index 00000000..4fb7a292 --- /dev/null +++ b/src/qbe/spill.c @@ -0,0 +1,531 @@ +#include "all.h" + +static void +aggreg(Blk *hd, Blk *b) +{ + int k; + + /* aggregate looping information at + * loop headers */ + bsunion(hd->gen, b->gen); + for (k=0; k<2; k++) + if (b->nlive[k] > hd->nlive[k]) + hd->nlive[k] = b->nlive[k]; +} + +static void +tmpuse(Ref r, int use, int loop, Fn *fn) +{ + Mem *m; + Tmp *t; + + if (rtype(r) == RMem) { + m = &fn->mem[r.val]; + tmpuse(m->base, 1, loop, fn); + tmpuse(m->index, 1, loop, fn); + } + else if (rtype(r) == RTmp && r.val >= Tmp0) { + t = &fn->tmp[r.val]; + t->nuse += use; + t->ndef += !use; + t->cost += loop; + } +} + +/* evaluate spill costs of temporaries, + * this also fills usage information + * requires rpo, preds + */ +void +fillcost(Fn *fn) +{ + int n; + uint a; + Blk *b; + Ins *i; + Tmp *t; + Phi *p; + + loopiter(fn, aggreg); + if (debug['S']) { + fprintf(stderr, "\n> Loop information:\n"); + for (b=fn->start; b; b=b->link) { + for (a=0; anpred; ++a) + if (b->id <= b->pred[a]->id) + break; + if (a != b->npred) { + fprintf(stderr, "\t%-10s", b->name); + fprintf(stderr, " (% 3d ", b->nlive[0]); + fprintf(stderr, "% 3d) ", b->nlive[1]); + dumpts(b->gen, fn->tmp, stderr); + } + } + } + for (t=fn->tmp; t-fn->tmp < fn->ntmp; t++) { + t->cost = t-fn->tmp < Tmp0 ? UINT_MAX : 0; + t->nuse = 0; + t->ndef = 0; + } + for (b=fn->start; b; b=b->link) { + for (p=b->phi; p; p=p->link) { + t = &fn->tmp[p->to.val]; + tmpuse(p->to, 0, 0, fn); + for (a=0; anarg; a++) { + n = p->blk[a]->loop; + t->cost += n; + tmpuse(p->arg[a], 1, n, fn); + } + } + n = b->loop; + for (i=b->ins; i<&b->ins[b->nins]; i++) { + tmpuse(i->to, 0, n, fn); + tmpuse(i->arg[0], 1, n, fn); + tmpuse(i->arg[1], 1, n, fn); + } + tmpuse(b->jmp.arg, 1, n, fn); + } + if (debug['S']) { + fprintf(stderr, "\n> Spill costs:\n"); + for (n=Tmp0; nntmp; n++) + fprintf(stderr, "\t%-10s %d\n", + fn->tmp[n].name, + fn->tmp[n].cost); + fprintf(stderr, "\n"); + } +} + +static BSet *fst; /* temps to prioritize in registers (for tcmp1) */ +static Tmp *tmp; /* current temporaries (for tcmpX) */ +static int ntmp; /* current # of temps (for limit) */ +static int locs; /* stack size used by locals */ +static int slot4; /* next slot of 4 bytes */ +static int slot8; /* ditto, 8 bytes */ +static BSet mask[2][1]; /* class masks */ + +static int +tcmp0(const void *pa, const void *pb) +{ + uint ca, cb; + + ca = tmp[*(int *)pa].cost; + cb = tmp[*(int *)pb].cost; + return (cb < ca) ? -1 : (cb > ca); +} + +static int +tcmp1(const void *pa, const void *pb) +{ + int c; + + c = bshas(fst, *(int *)pb) - bshas(fst, *(int *)pa); + return c ? c : tcmp0(pa, pb); +} + +static Ref +slot(int t) +{ + int s; + + assert(t >= Tmp0 && "cannot spill register"); + s = tmp[t].slot; + if (s == -1) { + /* specific to NAlign == 3 */ + /* nice logic to pack stack slots + * on demand, there can be only + * one hole and slot4 points to it + * + * invariant: slot4 <= slot8 + */ + if (KWIDE(tmp[t].cls)) { + s = slot8; + if (slot4 == slot8) + slot4 += 2; + slot8 += 2; + } else { + s = slot4; + if (slot4 == slot8) { + slot8 += 2; + slot4 += 1; + } else + slot4 = slot8; + } + s += locs; + tmp[t].slot = s; + } + return SLOT(s); +} + +/* restricts b to hold at most k + * temporaries, preferring those + * present in f (if given), then + * those with the largest spill + * cost + */ +static void +limit(BSet *b, int k, BSet *f) +{ + static int *tarr, maxt; + int i, t, nt; + + nt = bscount(b); + if (nt <= k) + return; + if (nt > maxt) { + free(tarr); + tarr = emalloc(nt * sizeof tarr[0]); + maxt = nt; + } + for (i=0, t=0; bsiter(b, &t); t++) { + bsclr(b, t); + tarr[i++] = t; + } + if (nt > 1) { + if (!f) + qsort(tarr, nt, sizeof tarr[0], tcmp0); + else { + fst = f; + qsort(tarr, nt, sizeof tarr[0], tcmp1); + } + } + for (i=0; iop == Ocopy && isreg(i->arg[0]); +} + +static Ins * +dopm(Blk *b, Ins *i, BSet *v) +{ + int n, t; + BSet u[1]; + Ins *i1; + bits r; + + bsinit(u, ntmp); /* todo, free those */ + /* consecutive copies from + * registers need to be handled + * as one large instruction + * + * fixme: there is an assumption + * that calls are always followed + * by copy instructions here, this + * might not be true if previous + * passes change + */ + i1 = ++i; + do { + i--; + t = i->to.val; + if (!req(i->to, R)) + if (bshas(v, t)) { + bsclr(v, t); + store(i->to, tmp[t].slot); + } + bsset(v, i->arg[0].val); + } while (i != b->ins && regcpy(i-1)); + bscopy(u, v); + if (i != b->ins && (i-1)->op == Ocall) { + v->t[0] &= ~T.retregs((i-1)->arg[1], 0); + limit2(v, T.nrsave[0], T.nrsave[1], 0); + for (n=0, r=0; T.rsave[n]>=0; n++) + r |= BIT(T.rsave[n]); + v->t[0] |= T.argregs((i-1)->arg[1], 0); + } else { + limit2(v, 0, 0, 0); + r = v->t[0]; + } + sethint(v, r); + reloads(u, v); + do + emiti(*--i1); + while (i1 != i); + return i; +} + +static void +merge(BSet *u, Blk *bu, BSet *v, Blk *bv) +{ + int t; + + if (bu->loop <= bv->loop) + bsunion(u, v); + else + for (t=0; bsiter(v, &t); t++) + if (tmp[t].slot == -1) + bsset(u, t); +} + +/* spill code insertion + * requires spill costs, rpo, liveness + * + * Note: this will replace liveness + * information (in, out) with temporaries + * that must be in registers at block + * borders + * + * Be careful with: + * - Ocopy instructions to ensure register + * constraints + */ +void +spill(Fn *fn) +{ + Blk *b, *s1, *s2, *hd, **bp; + int j, l, t, k, lvarg[2]; + uint n; + BSet u[1], v[1], w[1]; + Ins *i; + Phi *p; + Mem *m; + bits r; + + tmp = fn->tmp; + ntmp = fn->ntmp; + bsinit(u, ntmp); + bsinit(v, ntmp); + bsinit(w, ntmp); + bsinit(mask[0], ntmp); + bsinit(mask[1], ntmp); + locs = fn->slot; + slot4 = 0; + slot8 = 0; + for (t=0; t= T.fpr0 && t < T.fpr0 + T.nfpr) + k = 1; + if (t >= Tmp0) + k = KBASE(tmp[t].cls); + bsset(mask[k], t); + } + + for (bp=&fn->rpo[fn->nblk]; bp!=fn->rpo;) { + b = *--bp; + /* invariant: all blocks with bigger rpo got + * their in,out updated. */ + + /* 1. find temporaries in registers at + * the end of the block (put them in v) */ + curi = 0; + s1 = b->s1; + s2 = b->s2; + hd = 0; + if (s1 && s1->id <= b->id) + hd = s1; + if (s2 && s2->id <= b->id) + if (!hd || s2->id >= hd->id) + hd = s2; + if (hd) { + /* back-edge */ + bszero(v); + hd->gen->t[0] |= T.rglob; /* don't spill registers */ + for (k=0; k<2; k++) { + n = k == 0 ? T.ngpr : T.nfpr; + bscopy(u, b->out); + bsinter(u, mask[k]); + bscopy(w, u); + bsinter(u, hd->gen); + bsdiff(w, hd->gen); + if (bscount(u) < n) { + j = bscount(w); /* live through */ + l = hd->nlive[k]; + limit(w, n - (l - j), 0); + bsunion(u, w); + } else + limit(u, n, 0); + bsunion(v, u); + } + } else if (s1) { + /* avoid reloading temporaries + * in the middle of loops */ + bszero(v); + liveon(w, b, s1); + merge(v, b, w, s1); + if (s2) { + liveon(u, b, s2); + merge(v, b, u, s2); + bsinter(w, u); + } + limit2(v, 0, 0, w); + } else { + bscopy(v, b->out); + if (rtype(b->jmp.arg) == RCall) + v->t[0] |= T.retregs(b->jmp.arg, 0); + } + if (rtype(b->jmp.arg) == RTmp) { + t = b->jmp.arg.val; + assert(KBASE(tmp[t].cls) == 0); + bsset(v, t); + limit2(v, 0, 0, NULL); + if (!bshas(v, t)) + b->jmp.arg = slot(t); + } + for (t=Tmp0; bsiter(b->out, &t); t++) + if (!bshas(v, t)) + slot(t); + bscopy(b->out, v); + + /* 2. process the block instructions */ + curi = &insb[NIns]; + for (i=&b->ins[b->nins]; i!=b->ins;) { + i--; + if (regcpy(i)) { + i = dopm(b, i, v); + continue; + } + bszero(w); + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + t = i->to.val; + if (bshas(v, t)) + bsclr(v, t); + else { + /* make sure we have a reg + * for the result */ + assert(t >= Tmp0 && "dead reg"); + bsset(v, t); + bsset(w, t); + } + } + j = T.memargs(i->op); + for (n=0; n<2; n++) + if (rtype(i->arg[n]) == RMem) + j--; + for (n=0; n<2; n++) + switch (rtype(i->arg[n])) { + case RMem: + t = i->arg[n].val; + m = &fn->mem[t]; + if (rtype(m->base) == RTmp) { + bsset(v, m->base.val); + bsset(w, m->base.val); + } + if (rtype(m->index) == RTmp) { + bsset(v, m->index.val); + bsset(w, m->index.val); + } + break; + case RTmp: + t = i->arg[n].val; + lvarg[n] = bshas(v, t); + bsset(v, t); + if (j-- <= 0) + bsset(w, t); + break; + } + bscopy(u, v); + limit2(v, 0, 0, w); + for (n=0; n<2; n++) + if (rtype(i->arg[n]) == RTmp) { + t = i->arg[n].val; + if (!bshas(v, t)) { + /* do not reload if the + * argument is dead + */ + if (!lvarg[n]) + bsclr(u, t); + i->arg[n] = slot(t); + } + } + reloads(u, v); + if (!req(i->to, R)) { + t = i->to.val; + store(i->to, tmp[t].slot); + if (t >= Tmp0) + /* in case i->to was a + * dead temporary */ + bsclr(v, t); + } + emiti(*i); + r = v->t[0]; /* Tmp0 is NBit */ + if (r) + sethint(v, r); + } + if (b == fn->start) + assert(v->t[0] == (T.rglob | fn->reg)); + else + assert(v->t[0] == T.rglob); + + for (p=b->phi; p; p=p->link) { + assert(rtype(p->to) == RTmp); + t = p->to.val; + if (bshas(v, t)) { + bsclr(v, t); + store(p->to, tmp[t].slot); + } else if (bshas(b->in, t)) + /* only if the phi is live */ + p->to = slot(p->to.val); + } + bscopy(b->in, v); + idup(b, curi, &insb[NIns]-curi); + } + + /* align the locals to a 16 byte boundary */ + /* specific to NAlign == 3 */ + slot8 += slot8 & 3; + fn->slot += slot8; + + if (debug['S']) { + fprintf(stderr, "\n> Block information:\n"); + for (b=fn->start; b; b=b->link) { + fprintf(stderr, "\t%-10s (% 5d) ", b->name, b->loop); + dumpts(b->out, fn->tmp, stderr); + } + fprintf(stderr, "\n> After spilling:\n"); + printfn(fn, stderr); + } +} diff --git a/src/qbe/ssa.c b/src/qbe/ssa.c new file mode 100644 index 00000000..89cde447 --- /dev/null +++ b/src/qbe/ssa.c @@ -0,0 +1,433 @@ +#include "all.h" +#include + +void +adduse(Tmp *tmp, int ty, Blk *b, ...) +{ + Use *u; + int n; + va_list ap; + + if (!tmp->use) + return; + va_start(ap, b); + n = tmp->nuse; + vgrow(&tmp->use, ++tmp->nuse); + u = &tmp->use[n]; + u->type = ty; + u->bid = b->id; + switch (ty) { + case UPhi: + u->u.phi = va_arg(ap, Phi *); + break; + case UIns: + u->u.ins = va_arg(ap, Ins *); + break; + case UJmp: + break; + default: + die("unreachable"); + } + va_end(ap); +} + +/* fill usage, width, phi, and class information + * must not change .visit fields + */ +void +filluse(Fn *fn) +{ + Blk *b; + Phi *p; + Ins *i; + int m, t, tp, w, x; + uint a; + Tmp *tmp; + + tmp = fn->tmp; + for (t=Tmp0; tntmp; t++) { + tmp[t].def = 0; + tmp[t].bid = -1u; + tmp[t].ndef = 0; + tmp[t].nuse = 0; + tmp[t].cls = 0; + tmp[t].phi = 0; + tmp[t].width = WFull; + if (tmp[t].use == 0) + tmp[t].use = vnew(0, sizeof(Use), PFn); + } + for (b=fn->start; b; b=b->link) { + for (p=b->phi; p; p=p->link) { + assert(rtype(p->to) == RTmp); + tp = p->to.val; + tmp[tp].bid = b->id; + tmp[tp].ndef++; + tmp[tp].cls = p->cls; + tp = phicls(tp, fn->tmp); + for (a=0; anarg; a++) + if (rtype(p->arg[a]) == RTmp) { + t = p->arg[a].val; + adduse(&tmp[t], UPhi, b, p); + t = phicls(t, fn->tmp); + if (t != tp) + tmp[t].phi = tp; + } + } + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (!req(i->to, R)) { + assert(rtype(i->to) == RTmp); + w = WFull; + if (isparbh(i->op)) + w = Wsb + (i->op - Oparsb); + if (isload(i->op) && i->op != Oload) + w = Wsb + (i->op - Oloadsb); + if (isext(i->op)) + w = Wsb + (i->op - Oextsb); + if (iscmp(i->op, &x, &x)) + w = Wub; + if (w == Wsw || w == Wuw) + if (i->cls == Kw) + w = WFull; + t = i->to.val; + tmp[t].width = w; + tmp[t].def = i; + tmp[t].bid = b->id; + tmp[t].ndef++; + tmp[t].cls = i->cls; + } + for (m=0; m<2; m++) + if (rtype(i->arg[m]) == RTmp) { + t = i->arg[m].val; + adduse(&tmp[t], UIns, b, i); + } + } + if (rtype(b->jmp.arg) == RTmp) + adduse(&tmp[b->jmp.arg.val], UJmp, b); + } +} + +static Ref +refindex(int t, Fn *fn) +{ + return newtmp(fn->tmp[t].name, fn->tmp[t].cls, fn); +} + +static void +phiins(Fn *fn) +{ + BSet u[1], defs[1]; + Blk *a, *b, **blist, **be, **bp; + Ins *i; + Phi *p; + Use *use; + Ref r; + int t, nt, ok; + uint n, defb; + short k; + + bsinit(u, fn->nblk); + bsinit(defs, fn->nblk); + blist = emalloc(fn->nblk * sizeof blist[0]); + be = &blist[fn->nblk]; + nt = fn->ntmp; + for (t=Tmp0; ttmp[t].visit = 0; + if (fn->tmp[t].phi != 0) + continue; + if (fn->tmp[t].ndef == 1) { + ok = 1; + defb = fn->tmp[t].bid; + use = fn->tmp[t].use; + for (n=fn->tmp[t].nuse; n--; use++) + ok &= use->bid == defb; + if (ok || defb == fn->start->id) + continue; + } + bszero(u); + k = Kx; + bp = be; + for (b=fn->start; b; b=b->link) { + b->visit = 0; + r = R; + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (!req(r, R)) { + if (req(i->arg[0], TMP(t))) + i->arg[0] = r; + if (req(i->arg[1], TMP(t))) + i->arg[1] = r; + } + if (req(i->to, TMP(t))) { + if (!bshas(b->out, t)) { + r = refindex(t, fn); + i->to = r; + } else { + if (!bshas(u, b->id)) { + bsset(u, b->id); + *--bp = b; + } + if (clsmerge(&k, i->cls)) + die("invalid input"); + } + } + } + if (!req(r, R) && req(b->jmp.arg, TMP(t))) + b->jmp.arg = r; + } + bscopy(defs, u); + while (bp != be) { + fn->tmp[t].visit = t; + b = *bp++; + bsclr(u, b->id); + for (n=0; nnfron; n++) { + a = b->fron[n]; + if (a->visit++ == 0) + if (bshas(a->in, t)) { + p = alloc(sizeof *p); + p->cls = k; + p->to = TMP(t); + p->link = a->phi; + p->arg = vnew(0, sizeof p->arg[0], PFn); + p->blk = vnew(0, sizeof p->blk[0], PFn); + a->phi = p; + if (!bshas(defs, a->id)) + if (!bshas(u, a->id)) { + bsset(u, a->id); + *--bp = a; + } + } + } + } + } + free(blist); +} + +typedef struct Name Name; +struct Name { + Ref r; + Blk *b; + Name *up; +}; + +static Name *namel; + +static Name * +nnew(Ref r, Blk *b, Name *up) +{ + Name *n; + + if (namel) { + n = namel; + namel = n->up; + } else + /* could use alloc, here + * but namel should be reset + */ + n = emalloc(sizeof *n); + n->r = r; + n->b = b; + n->up = up; + return n; +} + +static void +nfree(Name *n) +{ + n->up = namel; + namel = n; +} + +static void +rendef(Ref *r, Blk *b, Name **stk, Fn *fn) +{ + Ref r1; + int t; + + t = r->val; + if (req(*r, R) || !fn->tmp[t].visit) + return; + r1 = refindex(t, fn); + fn->tmp[r1.val].visit = t; + stk[t] = nnew(r1, b, stk[t]); + *r = r1; +} + +static Ref +getstk(int t, Blk *b, Name **stk) +{ + Name *n, *n1; + + n = stk[t]; + while (n && !dom(n->b, b)) { + n1 = n; + n = n->up; + nfree(n1); + } + stk[t] = n; + if (!n) { + /* uh, oh, warn */ + return UNDEF; + } else + return n->r; +} + +static void +renblk(Blk *b, Name **stk, Fn *fn) +{ + Phi *p; + Ins *i; + Blk *s, **ps, *succ[3]; + int t, m; + + for (p=b->phi; p; p=p->link) + rendef(&p->to, b, stk, fn); + for (i=b->ins; i<&b->ins[b->nins]; i++) { + for (m=0; m<2; m++) { + t = i->arg[m].val; + if (rtype(i->arg[m]) == RTmp) + if (fn->tmp[t].visit) + i->arg[m] = getstk(t, b, stk); + } + rendef(&i->to, b, stk, fn); + } + t = b->jmp.arg.val; + if (rtype(b->jmp.arg) == RTmp) + if (fn->tmp[t].visit) + b->jmp.arg = getstk(t, b, stk); + succ[0] = b->s1; + succ[1] = b->s2 == b->s1 ? 0 : b->s2; + succ[2] = 0; + for (ps=succ; (s=*ps); ps++) + for (p=s->phi; p; p=p->link) { + t = p->to.val; + if ((t=fn->tmp[t].visit)) { + m = p->narg++; + vgrow(&p->arg, p->narg); + vgrow(&p->blk, p->narg); + p->arg[m] = getstk(t, b, stk); + p->blk[m] = b; + } + } + for (s=b->dom; s; s=s->dlink) + renblk(s, stk, fn); +} + +/* require rpo and use */ +void +ssa(Fn *fn) +{ + Name **stk, *n; + int d, nt; + Blk *b, *b1; + + nt = fn->ntmp; + stk = emalloc(nt * sizeof stk[0]); + d = debug['L']; + debug['L'] = 0; + filldom(fn); + if (debug['N']) { + fprintf(stderr, "\n> Dominators:\n"); + for (b1=fn->start; b1; b1=b1->link) { + if (!b1->dom) + continue; + fprintf(stderr, "%10s:", b1->name); + for (b=b1->dom; b; b=b->dlink) + fprintf(stderr, " %s", b->name); + fprintf(stderr, "\n"); + } + } + fillfron(fn); + filllive(fn); + phiins(fn); + renblk(fn->start, stk, fn); + while (nt--) + while ((n=stk[nt])) { + stk[nt] = n->up; + nfree(n); + } + debug['L'] = d; + free(stk); + if (debug['N']) { + fprintf(stderr, "\n> After SSA construction:\n"); + printfn(fn, stderr); + } +} + +static int +phicheck(Phi *p, Blk *b, Ref t) +{ + Blk *b1; + uint n; + + for (n=0; nnarg; n++) + if (req(p->arg[n], t)) { + b1 = p->blk[n]; + if (b1 != b && !sdom(b, b1)) + return 1; + } + return 0; +} + +/* require use and ssa */ +void +ssacheck(Fn *fn) +{ + Tmp *t; + Ins *i; + Phi *p; + Use *u; + Blk *b, *bu; + Ref r; + + for (t=&fn->tmp[Tmp0]; t-fn->tmp < fn->ntmp; t++) { + if (t->ndef > 1) + err("ssa temporary %%%s defined more than once", + t->name); + if (t->nuse > 0 && t->ndef == 0) { + bu = fn->rpo[t->use[0].bid]; + goto Err; + } + } + for (b=fn->start; b; b=b->link) { + for (p=b->phi; p; p=p->link) { + r = p->to; + t = &fn->tmp[r.val]; + for (u=t->use; u<&t->use[t->nuse]; u++) { + bu = fn->rpo[u->bid]; + if (u->type == UPhi) { + if (phicheck(u->u.phi, b, r)) + goto Err; + } else + if (bu != b && !sdom(b, bu)) + goto Err; + } + } + for (i=b->ins; i<&b->ins[b->nins]; i++) { + if (rtype(i->to) != RTmp) + continue; + r = i->to; + t = &fn->tmp[r.val]; + for (u=t->use; u<&t->use[t->nuse]; u++) { + bu = fn->rpo[u->bid]; + if (u->type == UPhi) { + if (phicheck(u->u.phi, b, r)) + goto Err; + } else { + if (bu == b) { + if (u->type == UIns) + if (u->u.ins <= i) + goto Err; + } else + if (!sdom(b, bu)) + goto Err; + } + } + } + } + return; +Err: + if (t->visit) + die("%%%s violates ssa invariant", t->name); + else + err("ssa temporary %%%s is used undefined in @%s", + t->name, bu->name); +} diff --git a/src/qbe/test/_alt.ssa b/src/qbe/test/_alt.ssa new file mode 100644 index 00000000..3f89e5e2 --- /dev/null +++ b/src/qbe/test/_alt.ssa @@ -0,0 +1,25 @@ +# an example with reducible control +# flow graph that exposes poor +# handling of looping constructs + +function $test() { +@start + %ten =w copy 10 + %dum =w copy 0 # dummy live-through temporary +@loop + %alt =w phi @start 0, @left %alt1, @right %alt1 + %cnt =w phi @start 100, @left %cnt, @right %cnt1 + %alt1 =w sub 1, %alt + jnz %alt1, @right, @left +@left + %x =w phi @loop 10, @left %x1 + %x1 =w sub %x, 1 + %z =w copy %x + jnz %z, @left, @loop +@right + %cnt1 =w sub %cnt, %ten + jnz %cnt1, @loop, @end +@end + %ret =w add %cnt, %dum + ret +} diff --git a/src/qbe/test/_bf99.ssa b/src/qbe/test/_bf99.ssa new file mode 100644 index 00000000..a1dd85cb --- /dev/null +++ b/src/qbe/test/_bf99.ssa @@ -0,0 +1,2687 @@ +export +function w $main() { + @start + %ptr =l alloc16 4096 + %r =l call $memset(l %ptr, l 0, l 4096) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L0 + %v =l loadl %ptr + jnz %v, @.L1, @.L2 + @.L1 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L0 + @.L2 + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L3 + %v =l loadl %ptr + jnz %v, @.L4, @.L5 + @.L4 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L3 + @.L5 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L6 + %v =l loadl %ptr + jnz %v, @.L7, @.L8 + @.L7 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L6 + @.L8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 14 + storel %v, %ptr + @.L9 + %v =l loadl %ptr + jnz %v, @.L10, @.L11 + @.L10 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L9 + @.L11 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L12 + %v =l loadl %ptr + jnz %v, @.L13, @.L14 + @.L13 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L12 + @.L14 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L15 + %v =l loadl %ptr + jnz %v, @.L16, @.L17 + @.L16 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L15 + @.L17 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L18 + %v =l loadl %ptr + jnz %v, @.L19, @.L20 + @.L19 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L18 + @.L20 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L21 + %v =l loadl %ptr + jnz %v, @.L22, @.L23 + @.L22 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L21 + @.L23 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L24 + %v =l loadl %ptr + jnz %v, @.L25, @.L26 + @.L25 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L24 + @.L26 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L27 + %v =l loadl %ptr + jnz %v, @.L28, @.L29 + @.L28 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L27 + @.L29 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L30 + %v =l loadl %ptr + jnz %v, @.L31, @.L32 + @.L31 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L30 + @.L32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L33 + %v =l loadl %ptr + jnz %v, @.L34, @.L35 + @.L34 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L33 + @.L35 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L36 + %v =l loadl %ptr + jnz %v, @.L37, @.L38 + @.L37 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L36 + @.L38 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L39 + %v =l loadl %ptr + jnz %v, @.L40, @.L41 + @.L40 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L39 + @.L41 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 14 + storel %v, %ptr + @.L42 + %v =l loadl %ptr + jnz %v, @.L43, @.L44 + @.L43 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L42 + @.L44 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L45 + %v =l loadl %ptr + jnz %v, @.L46, @.L47 + @.L46 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L45 + @.L47 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L48 + %v =l loadl %ptr + jnz %v, @.L49, @.L50 + @.L49 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L48 + @.L50 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L51 + %v =l loadl %ptr + jnz %v, @.L52, @.L53 + @.L52 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L51 + @.L53 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L54 + %v =l loadl %ptr + jnz %v, @.L55, @.L56 + @.L55 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L54 + @.L56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L57 + %v =l loadl %ptr + jnz %v, @.L58, @.L59 + @.L58 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L57 + @.L59 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L60 + %v =l loadl %ptr + jnz %v, @.L61, @.L62 + @.L61 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L60 + @.L62 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L63 + %v =l loadl %ptr + jnz %v, @.L64, @.L65 + @.L64 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L63 + @.L65 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L66 + %v =l loadl %ptr + jnz %v, @.L67, @.L68 + @.L67 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L66 + @.L68 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L69 + %v =l loadl %ptr + jnz %v, @.L70, @.L71 + @.L70 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L69 + @.L71 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L72 + %v =l loadl %ptr + jnz %v, @.L73, @.L74 + @.L73 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L72 + @.L74 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L75 + %v =l loadl %ptr + jnz %v, @.L76, @.L77 + @.L76 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L75 + @.L77 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L78 + %v =l loadl %ptr + jnz %v, @.L79, @.L80 + @.L79 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L78 + @.L80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L81 + %v =l loadl %ptr + jnz %v, @.L82, @.L83 + @.L82 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L81 + @.L83 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L84 + %v =l loadl %ptr + jnz %v, @.L85, @.L86 + @.L85 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L84 + @.L86 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L87 + %v =l loadl %ptr + jnz %v, @.L88, @.L89 + @.L88 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L87 + @.L89 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L90 + %v =l loadl %ptr + jnz %v, @.L91, @.L92 + @.L91 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L90 + @.L92 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L93 + %v =l loadl %ptr + jnz %v, @.L94, @.L95 + @.L94 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L93 + @.L95 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L96 + %v =l loadl %ptr + jnz %v, @.L97, @.L98 + @.L97 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L96 + @.L98 + %v =l loadl %ptr + %v =l add %v, 3 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L99 + %v =l loadl %ptr + jnz %v, @.L100, @.L101 + @.L100 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L99 + @.L101 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L102 + %v =l loadl %ptr + jnz %v, @.L103, @.L104 + @.L103 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L102 + @.L104 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L105 + %v =l loadl %ptr + jnz %v, @.L106, @.L107 + @.L106 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L105 + @.L107 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L108 + %v =l loadl %ptr + jnz %v, @.L109, @.L110 + @.L109 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L108 + @.L110 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L111 + %v =l loadl %ptr + jnz %v, @.L112, @.L113 + @.L112 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L111 + @.L113 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L114 + %v =l loadl %ptr + jnz %v, @.L115, @.L116 + @.L115 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L114 + @.L116 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L117 + %v =l loadl %ptr + jnz %v, @.L118, @.L119 + @.L118 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L117 + @.L119 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L120 + %v =l loadl %ptr + jnz %v, @.L121, @.L122 + @.L121 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L120 + @.L122 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L123 + %v =l loadl %ptr + jnz %v, @.L124, @.L125 + @.L124 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L123 + @.L125 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L126 + %v =l loadl %ptr + jnz %v, @.L127, @.L128 + @.L127 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L126 + @.L128 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L129 + %v =l loadl %ptr + jnz %v, @.L130, @.L131 + @.L130 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L129 + @.L131 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L132 + %v =l loadl %ptr + jnz %v, @.L133, @.L134 + @.L133 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L132 + @.L134 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L135 + %v =l loadl %ptr + jnz %v, @.L136, @.L137 + @.L136 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L135 + @.L137 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L138 + %v =l loadl %ptr + jnz %v, @.L139, @.L140 + @.L139 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L138 + @.L140 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L141 + %v =l loadl %ptr + jnz %v, @.L142, @.L143 + @.L142 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L141 + @.L143 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L144 + %v =l loadl %ptr + jnz %v, @.L145, @.L146 + @.L145 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L144 + @.L146 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L147 + %v =l loadl %ptr + jnz %v, @.L148, @.L149 + @.L148 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L147 + @.L149 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L150 + %v =l loadl %ptr + jnz %v, @.L151, @.L152 + @.L151 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L150 + @.L152 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L153 + %v =l loadl %ptr + jnz %v, @.L154, @.L155 + @.L154 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L153 + @.L155 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L156 + %v =l loadl %ptr + jnz %v, @.L157, @.L158 + @.L157 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L156 + @.L158 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L159 + %v =l loadl %ptr + jnz %v, @.L160, @.L161 + @.L160 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L159 + @.L161 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L162 + %v =l loadl %ptr + jnz %v, @.L163, @.L164 + @.L163 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L162 + @.L164 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L165 + %v =l loadl %ptr + jnz %v, @.L166, @.L167 + @.L166 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L165 + @.L167 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + @.L168 + %v =l loadl %ptr + jnz %v, @.L169, @.L170 + @.L169 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L168 + @.L170 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 12 + storel %v, %ptr + @.L171 + %v =l loadl %ptr + jnz %v, @.L172, @.L173 + @.L172 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L171 + @.L173 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + @.L174 + %v =l loadl %ptr + jnz %v, @.L175, @.L176 + @.L175 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L174 + @.L176 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L177 + %v =l loadl %ptr + jnz %v, @.L178, @.L179 + @.L178 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L177 + @.L179 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L180 + %v =l loadl %ptr + jnz %v, @.L181, @.L182 + @.L181 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 9 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L180 + @.L182 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L183 + %v =l loadl %ptr + jnz %v, @.L184, @.L185 + @.L184 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L183 + @.L185 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L186 + %v =l loadl %ptr + jnz %v, @.L187, @.L188 + @.L187 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L186 + @.L188 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L189 + %v =l loadl %ptr + jnz %v, @.L190, @.L191 + @.L190 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L189 + @.L191 + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L192 + %v =l loadl %ptr + jnz %v, @.L193, @.L194 + @.L193 + %ptr =l add %ptr, -8 + jmp @.L192 + @.L194 + %ptr =l add %ptr, 32 + @.L195 + %v =l loadl %ptr + jnz %v, @.L196, @.L197 + @.L196 + %ptr =l add %ptr, -8 + @.L198 + %v =l loadl %ptr + jnz %v, @.L199, @.L200 + @.L199 + @.L201 + %v =l loadl %ptr + jnz %v, @.L202, @.L203 + @.L202 + %ptr =l add %ptr, 8 + jmp @.L201 + @.L203 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L204 + %v =l loadl %ptr + jnz %v, @.L205, @.L206 + @.L205 + %ptr =l add %ptr, -8 + jmp @.L204 + @.L206 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L207 + %v =l loadl %ptr + jnz %v, @.L208, @.L209 + @.L208 + %ptr =l add %ptr, -8 + jmp @.L207 + @.L209 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 104 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L210 + %v =l loadl %ptr + jnz %v, @.L211, @.L212 + @.L211 + %ptr =l add %ptr, -8 + jmp @.L210 + @.L212 + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L213 + %v =l loadl %ptr + jnz %v, @.L214, @.L215 + @.L214 + %ptr =l add %ptr, -8 + jmp @.L213 + @.L215 + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L198 + @.L200 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L216 + %v =l loadl %ptr + jnz %v, @.L217, @.L218 + @.L217 + %ptr =l add %ptr, 8 + jmp @.L216 + @.L218 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L219 + %v =l loadl %ptr + jnz %v, @.L220, @.L221 + @.L220 + %ptr =l add %ptr, -8 + jmp @.L219 + @.L221 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L222 + %v =l loadl %ptr + jnz %v, @.L223, @.L224 + @.L223 + %ptr =l add %ptr, -8 + jmp @.L222 + @.L224 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L195 + @.L197 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -2 + storel %v, %ptr + @.L225 + %v =l loadl %ptr + jnz %v, @.L226, @.L227 + @.L226 + @.L228 + %v =l loadl %ptr + jnz %v, @.L229, @.L230 + @.L229 + %ptr =l add %ptr, 8 + jmp @.L228 + @.L230 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L231 + %v =l loadl %ptr + jnz %v, @.L232, @.L233 + @.L232 + %ptr =l add %ptr, -8 + jmp @.L231 + @.L233 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L234 + %v =l loadl %ptr + jnz %v, @.L235, @.L236 + @.L235 + %ptr =l add %ptr, -8 + jmp @.L234 + @.L236 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 104 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L237 + %v =l loadl %ptr + jnz %v, @.L238, @.L239 + @.L238 + %ptr =l add %ptr, -8 + jmp @.L237 + @.L239 + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L240 + %v =l loadl %ptr + jnz %v, @.L241, @.L242 + @.L241 + %ptr =l add %ptr, -8 + jmp @.L240 + @.L242 + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L225 + @.L227 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L243 + %v =l loadl %ptr + jnz %v, @.L244, @.L245 + @.L244 + %ptr =l add %ptr, 8 + jmp @.L243 + @.L245 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L246 + %v =l loadl %ptr + jnz %v, @.L247, @.L248 + @.L247 + %ptr =l add %ptr, -8 + jmp @.L246 + @.L248 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L249 + %v =l loadl %ptr + jnz %v, @.L250, @.L251 + @.L250 + %ptr =l add %ptr, 8 + jmp @.L249 + @.L251 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L252 + %v =l loadl %ptr + jnz %v, @.L253, @.L254 + @.L253 + %ptr =l add %ptr, -8 + jmp @.L252 + @.L254 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L255 + %v =l loadl %ptr + jnz %v, @.L256, @.L257 + @.L256 + %ptr =l add %ptr, -8 + jmp @.L255 + @.L257 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 104 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L258 + %v =l loadl %ptr + jnz %v, @.L259, @.L260 + @.L259 + %ptr =l add %ptr, 8 + jmp @.L258 + @.L260 + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @.L261 + %v =l loadl %ptr + jnz %v, @.L262, @.L263 + @.L262 + %ptr =l add %ptr, -8 + jmp @.L261 + @.L263 + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %r =l call $putchar(l %v) + @end + ret 0 +} diff --git a/src/qbe/test/_bfmandel.ssa b/src/qbe/test/_bfmandel.ssa new file mode 100644 index 00000000..a5cf211b --- /dev/null +++ b/src/qbe/test/_bfmandel.ssa @@ -0,0 +1,9079 @@ +export +function w $main() { + @start + %ptr =l alloc16 4096 + %r =l call $memset(l %ptr, l 0, l 4096) + %v =l loadl %ptr + %v =l add %v, 13 + storel %v, %ptr + @.L0 + %v =l loadl %ptr + jnz %v, @.L1, @.L2 + @.L1 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + jmp @.L0 + @.L2 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 6 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -3 + storel %v, %ptr + %ptr =l add %ptr, 80 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L3 + %v =l loadl %ptr + jnz %v, @.L4, @.L5 + @.L4 + @.L6 + %v =l loadl %ptr + jnz %v, @.L7, @.L8 + @.L7 + %ptr =l add %ptr, 72 + jmp @.L6 + @.L8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L9 + %v =l loadl %ptr + jnz %v, @.L10, @.L11 + @.L10 + %ptr =l add %ptr, -72 + jmp @.L9 + @.L11 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L3 + @.L5 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L12 + %v =l loadl %ptr + jnz %v, @.L13, @.L14 + @.L13 + %ptr =l add %ptr, 64 + @.L15 + %v =l loadl %ptr + jnz %v, @.L16, @.L17 + @.L16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L15 + @.L17 + %ptr =l add %ptr, 8 + jmp @.L12 + @.L14 + %ptr =l add %ptr, -72 + @.L18 + %v =l loadl %ptr + jnz %v, @.L19, @.L20 + @.L19 + %ptr =l add %ptr, -72 + jmp @.L18 + @.L20 + %ptr =l add %ptr, 64 + @.L21 + %v =l loadl %ptr + jnz %v, @.L22, @.L23 + @.L22 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L21 + @.L23 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L24 + %v =l loadl %ptr + jnz %v, @.L25, @.L26 + @.L25 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L27 + %v =l loadl %ptr + jnz %v, @.L28, @.L29 + @.L28 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L27 + @.L29 + %ptr =l add %ptr, 72 + jmp @.L24 + @.L26 + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 208 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -136 + @.L30 + %v =l loadl %ptr + jnz %v, @.L31, @.L32 + @.L31 + %ptr =l add %ptr, -72 + jmp @.L30 + @.L32 + %ptr =l add %ptr, 24 + @.L33 + %v =l loadl %ptr + jnz %v, @.L34, @.L35 + @.L34 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L33 + @.L35 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L36 + %v =l loadl %ptr + jnz %v, @.L37, @.L38 + @.L37 + %ptr =l add %ptr, 48 + @.L39 + %v =l loadl %ptr + jnz %v, @.L40, @.L41 + @.L40 + %ptr =l add %ptr, 56 + @.L42 + %v =l loadl %ptr + jnz %v, @.L43, @.L44 + @.L43 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L42 + @.L44 + %ptr =l add %ptr, 16 + jmp @.L39 + @.L41 + %ptr =l add %ptr, -72 + @.L45 + %v =l loadl %ptr + jnz %v, @.L46, @.L47 + @.L46 + %ptr =l add %ptr, -72 + jmp @.L45 + @.L47 + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 40 + @.L48 + %v =l loadl %ptr + jnz %v, @.L49, @.L50 + @.L49 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L48 + @.L50 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + @.L51 + %v =l loadl %ptr + jnz %v, @.L52, @.L53 + @.L52 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L54 + %v =l loadl %ptr + jnz %v, @.L55, @.L56 + @.L55 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L54 + @.L56 + %ptr =l add %ptr, 72 + jmp @.L51 + @.L53 + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + @.L57 + %v =l loadl %ptr + jnz %v, @.L58, @.L59 + @.L58 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L60 + %v =l loadl %ptr + jnz %v, @.L61, @.L62 + @.L61 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L60 + @.L62 + %ptr =l add %ptr, 72 + jmp @.L57 + @.L59 + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -128 + @.L63 + %v =l loadl %ptr + jnz %v, @.L64, @.L65 + @.L64 + %ptr =l add %ptr, -72 + jmp @.L63 + @.L65 + %ptr =l add %ptr, 24 + @.L66 + %v =l loadl %ptr + jnz %v, @.L67, @.L68 + @.L67 + @.L69 + %v =l loadl %ptr + jnz %v, @.L70, @.L71 + @.L70 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L69 + @.L71 + %ptr =l add %ptr, 48 + @.L72 + %v =l loadl %ptr + jnz %v, @.L73, @.L74 + @.L73 + %ptr =l add %ptr, 40 + %ptr =l add %ptr, 16 + @.L75 + %v =l loadl %ptr + jnz %v, @.L76, @.L77 + @.L76 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L75 + @.L77 + %ptr =l add %ptr, -48 + @.L78 + %v =l loadl %ptr + jnz %v, @.L79, @.L80 + @.L79 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L78 + @.L80 + %ptr =l add %ptr, 64 + jmp @.L72 + @.L74 + %ptr =l add %ptr, -72 + @.L81 + %v =l loadl %ptr + jnz %v, @.L82, @.L83 + @.L82 + %ptr =l add %ptr, -72 + jmp @.L81 + @.L83 + %ptr =l add %ptr, 72 + @.L84 + %v =l loadl %ptr + jnz %v, @.L85, @.L86 + @.L85 + %ptr =l add %ptr, 64 + @.L87 + %v =l loadl %ptr + jnz %v, @.L88, @.L89 + @.L88 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L87 + @.L89 + %ptr =l add %ptr, -56 + @.L90 + %v =l loadl %ptr + jnz %v, @.L91, @.L92 + @.L91 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L90 + @.L92 + %ptr =l add %ptr, 64 + jmp @.L84 + @.L86 + %ptr =l add %ptr, -72 + @.L93 + %v =l loadl %ptr + jnz %v, @.L94, @.L95 + @.L94 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -16 + jmp @.L93 + @.L95 + %ptr =l add %ptr, 56 + @.L96 + %v =l loadl %ptr + jnz %v, @.L97, @.L98 + @.L97 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L96 + @.L98 + %ptr =l add %ptr, -56 + @.L99 + %v =l loadl %ptr + jnz %v, @.L100, @.L101 + @.L100 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L99 + @.L101 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L102 + %v =l loadl %ptr + jnz %v, @.L103, @.L104 + @.L103 + @.L105 + %v =l loadl %ptr + jnz %v, @.L106, @.L107 + @.L106 + %ptr =l add %ptr, 72 + jmp @.L105 + @.L107 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L108 + %v =l loadl %ptr + jnz %v, @.L109, @.L110 + @.L109 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L108 + @.L110 + %ptr =l add %ptr, 8 + @.L111 + %v =l loadl %ptr + jnz %v, @.L112, @.L113 + @.L112 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L111 + @.L113 + %ptr =l add %ptr, 8 + @.L114 + %v =l loadl %ptr + jnz %v, @.L115, @.L116 + @.L115 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L114 + @.L116 + %ptr =l add %ptr, 8 + @.L117 + %v =l loadl %ptr + jnz %v, @.L118, @.L119 + @.L118 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L117 + @.L119 + %ptr =l add %ptr, 8 + @.L120 + %v =l loadl %ptr + jnz %v, @.L121, @.L122 + @.L121 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L120 + @.L122 + %ptr =l add %ptr, 8 + @.L123 + %v =l loadl %ptr + jnz %v, @.L124, @.L125 + @.L124 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L123 + @.L125 + %ptr =l add %ptr, 8 + @.L126 + %v =l loadl %ptr + jnz %v, @.L127, @.L128 + @.L127 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L126 + @.L128 + %ptr =l add %ptr, 8 + @.L129 + %v =l loadl %ptr + jnz %v, @.L130, @.L131 + @.L130 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L129 + @.L131 + %ptr =l add %ptr, 8 + @.L132 + %v =l loadl %ptr + jnz %v, @.L133, @.L134 + @.L133 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L132 + @.L134 + %ptr =l add %ptr, -72 + @.L135 + %v =l loadl %ptr + jnz %v, @.L136, @.L137 + @.L136 + %ptr =l add %ptr, -72 + jmp @.L135 + @.L137 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L102 + @.L104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L138 + %v =l loadl %ptr + jnz %v, @.L139, @.L140 + @.L139 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L138 + @.L140 + %ptr =l add %ptr, -72 + @.L141 + %v =l loadl %ptr + jnz %v, @.L142, @.L143 + @.L142 + %ptr =l add %ptr, -72 + jmp @.L141 + @.L143 + %ptr =l add %ptr, 72 + @.L144 + %v =l loadl %ptr + jnz %v, @.L145, @.L146 + @.L145 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L147 + %v =l loadl %ptr + jnz %v, @.L148, @.L149 + @.L148 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L147 + @.L149 + %ptr =l add %ptr, -32 + @.L150 + %v =l loadl %ptr + jnz %v, @.L151, @.L152 + @.L151 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L153 + %v =l loadl %ptr + jnz %v, @.L154, @.L155 + @.L154 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L156 + %v =l loadl %ptr + jnz %v, @.L157, @.L158 + @.L157 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L156 + @.L158 + %ptr =l add %ptr, -16 + @.L159 + %v =l loadl %ptr + jnz %v, @.L160, @.L161 + @.L160 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L159 + @.L161 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L153 + @.L155 + %ptr =l add %ptr, -64 + @.L162 + %v =l loadl %ptr + jnz %v, @.L163, @.L164 + @.L163 + %ptr =l add %ptr, -72 + jmp @.L162 + @.L164 + jmp @.L150 + @.L152 + %ptr =l add %ptr, 72 + @.L165 + %v =l loadl %ptr + jnz %v, @.L166, @.L167 + @.L166 + %ptr =l add %ptr, 72 + jmp @.L165 + @.L167 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -16 + @.L168 + %v =l loadl %ptr + jnz %v, @.L169, @.L170 + @.L169 + %ptr =l add %ptr, 8 + @.L171 + %v =l loadl %ptr + jnz %v, @.L172, @.L173 + @.L172 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L171 + @.L173 + %ptr =l add %ptr, -80 + jmp @.L168 + @.L170 + %ptr =l add %ptr, 8 + @.L174 + %v =l loadl %ptr + jnz %v, @.L175, @.L176 + @.L175 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L174 + @.L176 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L144 + @.L146 + %ptr =l add %ptr, -72 + @.L177 + %v =l loadl %ptr + jnz %v, @.L178, @.L179 + @.L178 + %ptr =l add %ptr, 8 + @.L180 + %v =l loadl %ptr + jnz %v, @.L181, @.L182 + @.L181 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L180 + @.L182 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L183 + %v =l loadl %ptr + jnz %v, @.L184, @.L185 + @.L184 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L186 + %v =l loadl %ptr + jnz %v, @.L187, @.L188 + @.L187 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L186 + @.L188 + %ptr =l add %ptr, -8 + @.L189 + %v =l loadl %ptr + jnz %v, @.L190, @.L191 + @.L190 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L189 + @.L191 + %ptr =l add %ptr, 32 + jmp @.L183 + @.L185 + %ptr =l add %ptr, -24 + @.L192 + %v =l loadl %ptr + jnz %v, @.L193, @.L194 + @.L193 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L192 + @.L194 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L177 + @.L179 + %ptr =l add %ptr, 40 + %ptr =l add %ptr, 32 + @.L195 + %v =l loadl %ptr + jnz %v, @.L196, @.L197 + @.L196 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L195 + @.L197 + %ptr =l add %ptr, -72 + @.L198 + %v =l loadl %ptr + jnz %v, @.L199, @.L200 + @.L199 + %ptr =l add %ptr, -72 + jmp @.L198 + @.L200 + %ptr =l add %ptr, 72 + @.L201 + %v =l loadl %ptr + jnz %v, @.L202, @.L203 + @.L202 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L204 + %v =l loadl %ptr + jnz %v, @.L205, @.L206 + @.L205 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L204 + @.L206 + %ptr =l add %ptr, -40 + @.L207 + %v =l loadl %ptr + jnz %v, @.L208, @.L209 + @.L208 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L210 + %v =l loadl %ptr + jnz %v, @.L211, @.L212 + @.L211 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L213 + %v =l loadl %ptr + jnz %v, @.L214, @.L215 + @.L214 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L213 + @.L215 + %ptr =l add %ptr, -24 + @.L216 + %v =l loadl %ptr + jnz %v, @.L217, @.L218 + @.L217 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L216 + @.L218 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L210 + @.L212 + %ptr =l add %ptr, -64 + @.L219 + %v =l loadl %ptr + jnz %v, @.L220, @.L221 + @.L220 + %ptr =l add %ptr, -72 + jmp @.L219 + @.L221 + jmp @.L207 + @.L209 + %ptr =l add %ptr, 72 + @.L222 + %v =l loadl %ptr + jnz %v, @.L223, @.L224 + @.L223 + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 56 + jmp @.L222 + @.L224 + %ptr =l add %ptr, -72 + @.L225 + %v =l loadl %ptr + jnz %v, @.L226, @.L227 + @.L226 + %ptr =l add %ptr, 16 + @.L228 + %v =l loadl %ptr + jnz %v, @.L229, @.L230 + @.L229 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L228 + @.L230 + %ptr =l add %ptr, -88 + jmp @.L225 + @.L227 + %ptr =l add %ptr, 16 + @.L231 + %v =l loadl %ptr + jnz %v, @.L232, @.L233 + @.L232 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L231 + @.L233 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L201 + @.L203 + %ptr =l add %ptr, -72 + @.L234 + %v =l loadl %ptr + jnz %v, @.L235, @.L236 + @.L235 + %ptr =l add %ptr, 8 + @.L237 + %v =l loadl %ptr + jnz %v, @.L238, @.L239 + @.L238 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L237 + @.L239 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L240 + %v =l loadl %ptr + jnz %v, @.L241, @.L242 + @.L241 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L243 + %v =l loadl %ptr + jnz %v, @.L244, @.L245 + @.L244 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L243 + @.L245 + %ptr =l add %ptr, -8 + @.L246 + %v =l loadl %ptr + jnz %v, @.L247, @.L248 + @.L247 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L246 + @.L248 + %ptr =l add %ptr, 32 + jmp @.L240 + @.L242 + %ptr =l add %ptr, -24 + @.L249 + %v =l loadl %ptr + jnz %v, @.L250, @.L251 + @.L250 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -8 + jmp @.L249 + @.L251 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L234 + @.L236 + %ptr =l add %ptr, 72 + @.L252 + %v =l loadl %ptr + jnz %v, @.L253, @.L254 + @.L253 + %ptr =l add %ptr, 32 + @.L255 + %v =l loadl %ptr + jnz %v, @.L256, @.L257 + @.L256 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -288 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 104 + %ptr =l add %ptr, 184 + jmp @.L255 + @.L257 + %ptr =l add %ptr, 40 + jmp @.L252 + @.L254 + %ptr =l add %ptr, -72 + @.L258 + %v =l loadl %ptr + jnz %v, @.L259, @.L260 + @.L259 + %ptr =l add %ptr, -72 + jmp @.L258 + @.L260 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L261 + %v =l loadl %ptr + jnz %v, @.L262, @.L263 + @.L262 + @.L264 + %v =l loadl %ptr + jnz %v, @.L265, @.L266 + @.L265 + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 40 + jmp @.L264 + @.L266 + %ptr =l add %ptr, -72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -72 + @.L267 + %v =l loadl %ptr + jnz %v, @.L268, @.L269 + @.L268 + %ptr =l add %ptr, -72 + jmp @.L267 + @.L269 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L261 + @.L263 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 168 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L270 + %v =l loadl %ptr + jnz %v, @.L271, @.L272 + @.L271 + %ptr =l add %ptr, -48 + %ptr =l add %ptr, -24 + jmp @.L270 + @.L272 + %ptr =l add %ptr, 72 + @.L273 + %v =l loadl %ptr + jnz %v, @.L274, @.L275 + @.L274 + %ptr =l add %ptr, 24 + @.L276 + %v =l loadl %ptr + jnz %v, @.L277, @.L278 + @.L277 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L276 + @.L278 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L279 + %v =l loadl %ptr + jnz %v, @.L280, @.L281 + @.L280 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L282 + %v =l loadl %ptr + jnz %v, @.L283, @.L284 + @.L283 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L282 + @.L284 + %ptr =l add %ptr, -32 + @.L285 + %v =l loadl %ptr + jnz %v, @.L286, @.L287 + @.L286 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -104 + @.L288 + %v =l loadl %ptr + jnz %v, @.L289, @.L290 + @.L289 + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -32 + jmp @.L288 + @.L290 + %ptr =l add %ptr, 32 + @.L291 + %v =l loadl %ptr + jnz %v, @.L292, @.L293 + @.L292 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L291 + @.L293 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L294 + %v =l loadl %ptr + jnz %v, @.L295, @.L296 + @.L295 + %ptr =l add %ptr, 72 + jmp @.L294 + @.L296 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L285 + @.L287 + jmp @.L279 + @.L281 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L297 + %v =l loadl %ptr + jnz %v, @.L298, @.L299 + @.L298 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L297 + @.L299 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L300 + %v =l loadl %ptr + jnz %v, @.L301, @.L302 + @.L301 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L303 + %v =l loadl %ptr + jnz %v, @.L304, @.L305 + @.L304 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L303 + @.L305 + %ptr =l add %ptr, -24 + @.L306 + %v =l loadl %ptr + jnz %v, @.L307, @.L308 + @.L307 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -96 + @.L309 + %v =l loadl %ptr + jnz %v, @.L310, @.L311 + @.L310 + %ptr =l add %ptr, -72 + jmp @.L309 + @.L311 + %ptr =l add %ptr, 24 + @.L312 + %v =l loadl %ptr + jnz %v, @.L313, @.L314 + @.L313 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L312 + @.L314 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L315 + %v =l loadl %ptr + jnz %v, @.L316, @.L317 + @.L316 + %ptr =l add %ptr, 72 + jmp @.L315 + @.L317 + %ptr =l add %ptr, 8 + @.L318 + %v =l loadl %ptr + jnz %v, @.L319, @.L320 + @.L319 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L318 + @.L320 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L306 + @.L308 + jmp @.L300 + @.L302 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L321 + %v =l loadl %ptr + jnz %v, @.L322, @.L323 + @.L322 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L324 + %v =l loadl %ptr + jnz %v, @.L325, @.L326 + @.L325 + %ptr =l add %ptr, 72 + jmp @.L324 + @.L326 + %ptr =l add %ptr, -48 + %ptr =l add %ptr, -16 + jmp @.L321 + @.L323 + %ptr =l add %ptr, 64 + jmp @.L273 + @.L275 + %ptr =l add %ptr, -72 + @.L327 + %v =l loadl %ptr + jnz %v, @.L328, @.L329 + @.L328 + %ptr =l add %ptr, -72 + jmp @.L327 + @.L329 + %ptr =l add %ptr, -56 + @.L330 + %v =l loadl %ptr + jnz %v, @.L331, @.L332 + @.L331 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L330 + @.L332 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 19 + storel %v, %ptr + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L333 + %v =l loadl %ptr + jnz %v, @.L334, @.L335 + @.L334 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L333 + @.L335 + %ptr =l add %ptr, -32 + @.L336 + %v =l loadl %ptr + jnz %v, @.L337, @.L338 + @.L337 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L339 + %v =l loadl %ptr + jnz %v, @.L340, @.L341 + @.L340 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L339 + @.L341 + %ptr =l add %ptr, -16 + jmp @.L336 + @.L338 + %ptr =l add %ptr, 16 + @.L342 + %v =l loadl %ptr + jnz %v, @.L343, @.L344 + @.L343 + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L345 + %v =l loadl %ptr + jnz %v, @.L346, @.L347 + @.L346 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L348 + %v =l loadl %ptr + jnz %v, @.L349, @.L350 + @.L349 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L348 + @.L350 + jmp @.L345 + @.L347 + %ptr =l add %ptr, 8 + @.L351 + %v =l loadl %ptr + jnz %v, @.L352, @.L353 + @.L352 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L354 + %v =l loadl %ptr + jnz %v, @.L355, @.L356 + @.L355 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L354 + @.L356 + %ptr =l add %ptr, 24 + jmp @.L351 + @.L353 + %ptr =l add %ptr, 104 + @.L357 + %v =l loadl %ptr + jnz %v, @.L358, @.L359 + @.L358 + %ptr =l add %ptr, 16 + @.L360 + %v =l loadl %ptr + jnz %v, @.L361, @.L362 + @.L361 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L360 + @.L362 + %ptr =l add %ptr, 8 + @.L363 + %v =l loadl %ptr + jnz %v, @.L364, @.L365 + @.L364 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L363 + @.L365 + %ptr =l add %ptr, 8 + @.L366 + %v =l loadl %ptr + jnz %v, @.L367, @.L368 + @.L367 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L366 + @.L368 + %ptr =l add %ptr, 40 + jmp @.L357 + @.L359 + %ptr =l add %ptr, -72 + @.L369 + %v =l loadl %ptr + jnz %v, @.L370, @.L371 + @.L370 + %ptr =l add %ptr, -72 + jmp @.L369 + @.L371 + %ptr =l add %ptr, 24 + @.L372 + %v =l loadl %ptr + jnz %v, @.L373, @.L374 + @.L373 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L372 + @.L374 + %ptr =l add %ptr, 48 + @.L375 + %v =l loadl %ptr + jnz %v, @.L376, @.L377 + @.L376 + %ptr =l add %ptr, 40 + @.L378 + %v =l loadl %ptr + jnz %v, @.L379, @.L380 + @.L379 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L378 + @.L380 + %ptr =l add %ptr, -32 + @.L381 + %v =l loadl %ptr + jnz %v, @.L382, @.L383 + @.L382 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L381 + @.L383 + %ptr =l add %ptr, 64 + jmp @.L375 + @.L377 + %ptr =l add %ptr, -72 + @.L384 + %v =l loadl %ptr + jnz %v, @.L385, @.L386 + @.L385 + %ptr =l add %ptr, -72 + jmp @.L384 + @.L386 + %ptr =l add %ptr, 72 + @.L387 + %v =l loadl %ptr + jnz %v, @.L388, @.L389 + @.L388 + %ptr =l add %ptr, 16 + @.L390 + %v =l loadl %ptr + jnz %v, @.L391, @.L392 + @.L391 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L390 + @.L392 + %ptr =l add %ptr, 56 + jmp @.L387 + @.L389 + %ptr =l add %ptr, -72 + @.L393 + %v =l loadl %ptr + jnz %v, @.L394, @.L395 + @.L394 + %ptr =l add %ptr, -72 + jmp @.L393 + @.L395 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L396 + %v =l loadl %ptr + jnz %v, @.L397, @.L398 + @.L397 + @.L399 + %v =l loadl %ptr + jnz %v, @.L400, @.L401 + @.L400 + %ptr =l add %ptr, 72 + jmp @.L399 + @.L401 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L402 + %v =l loadl %ptr + jnz %v, @.L403, @.L404 + @.L403 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L402 + @.L404 + %ptr =l add %ptr, 8 + @.L405 + %v =l loadl %ptr + jnz %v, @.L406, @.L407 + @.L406 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L405 + @.L407 + %ptr =l add %ptr, 8 + @.L408 + %v =l loadl %ptr + jnz %v, @.L409, @.L410 + @.L409 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L408 + @.L410 + %ptr =l add %ptr, 8 + @.L411 + %v =l loadl %ptr + jnz %v, @.L412, @.L413 + @.L412 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L411 + @.L413 + %ptr =l add %ptr, 8 + @.L414 + %v =l loadl %ptr + jnz %v, @.L415, @.L416 + @.L415 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L414 + @.L416 + %ptr =l add %ptr, 8 + @.L417 + %v =l loadl %ptr + jnz %v, @.L418, @.L419 + @.L418 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L417 + @.L419 + %ptr =l add %ptr, 8 + @.L420 + %v =l loadl %ptr + jnz %v, @.L421, @.L422 + @.L421 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L420 + @.L422 + %ptr =l add %ptr, 8 + @.L423 + %v =l loadl %ptr + jnz %v, @.L424, @.L425 + @.L424 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L423 + @.L425 + %ptr =l add %ptr, 8 + @.L426 + %v =l loadl %ptr + jnz %v, @.L427, @.L428 + @.L427 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L426 + @.L428 + %ptr =l add %ptr, -72 + @.L429 + %v =l loadl %ptr + jnz %v, @.L430, @.L431 + @.L430 + %ptr =l add %ptr, -72 + jmp @.L429 + @.L431 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L396 + @.L398 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L432 + %v =l loadl %ptr + jnz %v, @.L433, @.L434 + @.L433 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L432 + @.L434 + %ptr =l add %ptr, -24 + %ptr =l add %ptr, -48 + @.L435 + %v =l loadl %ptr + jnz %v, @.L436, @.L437 + @.L436 + %ptr =l add %ptr, -72 + jmp @.L435 + @.L437 + %ptr =l add %ptr, 72 + @.L438 + %v =l loadl %ptr + jnz %v, @.L439, @.L440 + @.L439 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L441 + %v =l loadl %ptr + jnz %v, @.L442, @.L443 + @.L442 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L441 + @.L443 + %ptr =l add %ptr, -40 + @.L444 + %v =l loadl %ptr + jnz %v, @.L445, @.L446 + @.L445 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L447 + %v =l loadl %ptr + jnz %v, @.L448, @.L449 + @.L448 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L450 + %v =l loadl %ptr + jnz %v, @.L451, @.L452 + @.L451 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L450 + @.L452 + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -8 + @.L453 + %v =l loadl %ptr + jnz %v, @.L454, @.L455 + @.L454 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L453 + @.L455 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L447 + @.L449 + %ptr =l add %ptr, -64 + @.L456 + %v =l loadl %ptr + jnz %v, @.L457, @.L458 + @.L457 + %ptr =l add %ptr, -72 + jmp @.L456 + @.L458 + jmp @.L444 + @.L446 + %ptr =l add %ptr, 72 + @.L459 + %v =l loadl %ptr + jnz %v, @.L460, @.L461 + @.L460 + %ptr =l add %ptr, 72 + jmp @.L459 + @.L461 + %ptr =l add %ptr, -72 + @.L462 + %v =l loadl %ptr + jnz %v, @.L463, @.L464 + @.L463 + %ptr =l add %ptr, 8 + @.L465 + %v =l loadl %ptr + jnz %v, @.L466, @.L467 + @.L466 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L465 + @.L467 + %ptr =l add %ptr, -80 + jmp @.L462 + @.L464 + %ptr =l add %ptr, 8 + @.L468 + %v =l loadl %ptr + jnz %v, @.L469, @.L470 + @.L469 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L468 + @.L470 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L438 + @.L440 + %ptr =l add %ptr, -72 + @.L471 + %v =l loadl %ptr + jnz %v, @.L472, @.L473 + @.L472 + %ptr =l add %ptr, 8 + @.L474 + %v =l loadl %ptr + jnz %v, @.L475, @.L476 + @.L475 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L474 + @.L476 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L477 + %v =l loadl %ptr + jnz %v, @.L478, @.L479 + @.L478 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L480 + %v =l loadl %ptr + jnz %v, @.L481, @.L482 + @.L481 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L480 + @.L482 + %ptr =l add %ptr, -8 + @.L483 + %v =l loadl %ptr + jnz %v, @.L484, @.L485 + @.L484 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L483 + @.L485 + %ptr =l add %ptr, 24 + jmp @.L477 + @.L479 + %ptr =l add %ptr, -16 + @.L486 + %v =l loadl %ptr + jnz %v, @.L487, @.L488 + @.L487 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L486 + @.L488 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L471 + @.L473 + %ptr =l add %ptr, 72 + @.L489 + %v =l loadl %ptr + jnz %v, @.L490, @.L491 + @.L490 + %ptr =l add %ptr, 48 + @.L492 + %v =l loadl %ptr + jnz %v, @.L493, @.L494 + @.L493 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L492 + @.L494 + %ptr =l add %ptr, -40 + @.L495 + %v =l loadl %ptr + jnz %v, @.L496, @.L497 + @.L496 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L495 + @.L497 + %ptr =l add %ptr, 64 + jmp @.L489 + @.L491 + %ptr =l add %ptr, -72 + @.L498 + %v =l loadl %ptr + jnz %v, @.L499, @.L500 + @.L499 + %ptr =l add %ptr, -72 + jmp @.L498 + @.L500 + %ptr =l add %ptr, 72 + @.L501 + %v =l loadl %ptr + jnz %v, @.L502, @.L503 + @.L502 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L501 + @.L503 + %ptr =l add %ptr, -72 + @.L504 + %v =l loadl %ptr + jnz %v, @.L505, @.L506 + @.L505 + %ptr =l add %ptr, -72 + jmp @.L504 + @.L506 + %ptr =l add %ptr, 72 + @.L507 + %v =l loadl %ptr + jnz %v, @.L508, @.L509 + @.L508 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L510 + %v =l loadl %ptr + jnz %v, @.L511, @.L512 + @.L511 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L510 + @.L512 + %ptr =l add %ptr, -40 + @.L513 + %v =l loadl %ptr + jnz %v, @.L514, @.L515 + @.L514 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L516 + %v =l loadl %ptr + jnz %v, @.L517, @.L518 + @.L517 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L519 + %v =l loadl %ptr + jnz %v, @.L520, @.L521 + @.L520 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L519 + @.L521 + %ptr =l add %ptr, -16 + @.L522 + %v =l loadl %ptr + jnz %v, @.L523, @.L524 + @.L523 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L522 + @.L524 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L516 + @.L518 + %ptr =l add %ptr, -64 + @.L525 + %v =l loadl %ptr + jnz %v, @.L526, @.L527 + @.L526 + %ptr =l add %ptr, -72 + jmp @.L525 + @.L527 + jmp @.L513 + @.L515 + %ptr =l add %ptr, 72 + @.L528 + %v =l loadl %ptr + jnz %v, @.L529, @.L530 + @.L529 + %ptr =l add %ptr, 72 + jmp @.L528 + @.L530 + %ptr =l add %ptr, -72 + @.L531 + %v =l loadl %ptr + jnz %v, @.L532, @.L533 + @.L532 + %ptr =l add %ptr, 8 + @.L534 + %v =l loadl %ptr + jnz %v, @.L535, @.L536 + @.L535 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L534 + @.L536 + %ptr =l add %ptr, -80 + jmp @.L531 + @.L533 + %ptr =l add %ptr, 8 + @.L537 + %v =l loadl %ptr + jnz %v, @.L538, @.L539 + @.L538 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L537 + @.L539 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L507 + @.L509 + %ptr =l add %ptr, -72 + @.L540 + %v =l loadl %ptr + jnz %v, @.L541, @.L542 + @.L541 + %ptr =l add %ptr, 8 + @.L543 + %v =l loadl %ptr + jnz %v, @.L544, @.L545 + @.L544 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L543 + @.L545 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L546 + %v =l loadl %ptr + jnz %v, @.L547, @.L548 + @.L547 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L549 + %v =l loadl %ptr + jnz %v, @.L550, @.L551 + @.L550 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L549 + @.L551 + %ptr =l add %ptr, -8 + @.L552 + %v =l loadl %ptr + jnz %v, @.L553, @.L554 + @.L553 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L552 + @.L554 + %ptr =l add %ptr, 32 + jmp @.L546 + @.L548 + %ptr =l add %ptr, -24 + @.L555 + %v =l loadl %ptr + jnz %v, @.L556, @.L557 + @.L556 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L555 + @.L557 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L540 + @.L542 + %ptr =l add %ptr, 72 + @.L558 + %v =l loadl %ptr + jnz %v, @.L559, @.L560 + @.L559 + %ptr =l add %ptr, 32 + @.L561 + %v =l loadl %ptr + jnz %v, @.L562, @.L563 + @.L562 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -288 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 288 + jmp @.L561 + @.L563 + %ptr =l add %ptr, 40 + jmp @.L558 + @.L560 + %ptr =l add %ptr, -72 + @.L564 + %v =l loadl %ptr + jnz %v, @.L565, @.L566 + @.L565 + %ptr =l add %ptr, -72 + jmp @.L564 + @.L566 + %ptr =l add %ptr, 72 + @.L567 + %v =l loadl %ptr + jnz %v, @.L568, @.L569 + @.L568 + %ptr =l add %ptr, 24 + @.L570 + %v =l loadl %ptr + jnz %v, @.L571, @.L572 + @.L571 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -288 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 280 + jmp @.L570 + @.L572 + %ptr =l add %ptr, 48 + jmp @.L567 + @.L569 + %ptr =l add %ptr, -72 + @.L573 + %v =l loadl %ptr + jnz %v, @.L574, @.L575 + @.L574 + %ptr =l add %ptr, -72 + jmp @.L573 + @.L575 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 8 + storel %v, %ptr + %v =l loadl %ptr + %v =l add %v, 7 + storel %v, %ptr + @.L576 + %v =l loadl %ptr + jnz %v, @.L577, @.L578 + @.L577 + @.L579 + %v =l loadl %ptr + jnz %v, @.L580, @.L581 + @.L580 + %ptr =l add %ptr, 72 + jmp @.L579 + @.L581 + %ptr =l add %ptr, -72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -72 + @.L582 + %v =l loadl %ptr + jnz %v, @.L583, @.L584 + @.L583 + %ptr =l add %ptr, -72 + jmp @.L582 + @.L584 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L576 + @.L578 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L585 + %v =l loadl %ptr + jnz %v, @.L586, @.L587 + @.L586 + %ptr =l add %ptr, 64 + @.L588 + %v =l loadl %ptr + jnz %v, @.L589, @.L590 + @.L589 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L588 + @.L590 + %ptr =l add %ptr, -56 + @.L591 + %v =l loadl %ptr + jnz %v, @.L592, @.L593 + @.L592 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L591 + @.L593 + %ptr =l add %ptr, 64 + jmp @.L585 + @.L587 + %ptr =l add %ptr, -72 + @.L594 + %v =l loadl %ptr + jnz %v, @.L595, @.L596 + @.L595 + %ptr =l add %ptr, -72 + jmp @.L594 + @.L596 + %ptr =l add %ptr, 72 + @.L597 + %v =l loadl %ptr + jnz %v, @.L598, @.L599 + @.L598 + %ptr =l add %ptr, 48 + @.L600 + %v =l loadl %ptr + jnz %v, @.L601, @.L602 + @.L601 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L600 + @.L602 + %ptr =l add %ptr, 24 + jmp @.L597 + @.L599 + %ptr =l add %ptr, -72 + @.L603 + %v =l loadl %ptr + jnz %v, @.L604, @.L605 + @.L604 + %ptr =l add %ptr, -72 + jmp @.L603 + @.L605 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L606 + %v =l loadl %ptr + jnz %v, @.L607, @.L608 + @.L607 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L606 + @.L608 + %ptr =l add %ptr, 8 + @.L609 + %v =l loadl %ptr + jnz %v, @.L610, @.L611 + @.L610 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L612 + %v =l loadl %ptr + jnz %v, @.L613, @.L614 + @.L613 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L612 + @.L614 + %ptr =l add %ptr, 40 + @.L615 + %v =l loadl %ptr + jnz %v, @.L616, @.L617 + @.L616 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L615 + @.L617 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + jmp @.L609 + @.L611 + %ptr =l add %ptr, -8 + @.L618 + %v =l loadl %ptr + jnz %v, @.L619, @.L620 + @.L619 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L618 + @.L620 + %ptr =l add %ptr, -40 + @.L621 + %v =l loadl %ptr + jnz %v, @.L622, @.L623 + @.L622 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L621 + @.L623 + %ptr =l add %ptr, 48 + @.L624 + %v =l loadl %ptr + jnz %v, @.L625, @.L626 + @.L625 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L624 + @.L626 + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L627 + %v =l loadl %ptr + jnz %v, @.L628, @.L629 + @.L628 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L627 + @.L629 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L630 + %v =l loadl %ptr + jnz %v, @.L631, @.L632 + @.L631 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L633 + %v =l loadl %ptr + jnz %v, @.L634, @.L635 + @.L634 + %ptr =l add %ptr, 16 + @.L636 + %v =l loadl %ptr + jnz %v, @.L637, @.L638 + @.L637 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L636 + @.L638 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L639 + %v =l loadl %ptr + jnz %v, @.L640, @.L641 + @.L640 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L642 + %v =l loadl %ptr + jnz %v, @.L643, @.L644 + @.L643 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L642 + @.L644 + %ptr =l add %ptr, -24 + @.L645 + %v =l loadl %ptr + jnz %v, @.L646, @.L647 + @.L646 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -96 + @.L648 + %v =l loadl %ptr + jnz %v, @.L649, @.L650 + @.L649 + %ptr =l add %ptr, -72 + jmp @.L648 + @.L650 + %ptr =l add %ptr, 24 + @.L651 + %v =l loadl %ptr + jnz %v, @.L652, @.L653 + @.L652 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L651 + @.L653 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L654 + %v =l loadl %ptr + jnz %v, @.L655, @.L656 + @.L655 + %ptr =l add %ptr, 72 + jmp @.L654 + @.L656 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L645 + @.L647 + jmp @.L639 + @.L641 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L657 + %v =l loadl %ptr + jnz %v, @.L658, @.L659 + @.L658 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L657 + @.L659 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L660 + %v =l loadl %ptr + jnz %v, @.L661, @.L662 + @.L661 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L663 + %v =l loadl %ptr + jnz %v, @.L664, @.L665 + @.L664 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L663 + @.L665 + %ptr =l add %ptr, -16 + @.L666 + %v =l loadl %ptr + jnz %v, @.L667, @.L668 + @.L667 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -88 + @.L669 + %v =l loadl %ptr + jnz %v, @.L670, @.L671 + @.L670 + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -32 + jmp @.L669 + @.L671 + %ptr =l add %ptr, 32 + @.L672 + %v =l loadl %ptr + jnz %v, @.L673, @.L674 + @.L673 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L672 + @.L674 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L675 + %v =l loadl %ptr + jnz %v, @.L676, @.L677 + @.L676 + %ptr =l add %ptr, 72 + jmp @.L675 + @.L677 + %ptr =l add %ptr, 8 + @.L678 + %v =l loadl %ptr + jnz %v, @.L679, @.L680 + @.L679 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L678 + @.L680 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L666 + @.L668 + jmp @.L660 + @.L662 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L681 + %v =l loadl %ptr + jnz %v, @.L682, @.L683 + @.L682 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L684 + %v =l loadl %ptr + jnz %v, @.L685, @.L686 + @.L685 + %ptr =l add %ptr, 72 + jmp @.L684 + @.L686 + %ptr =l add %ptr, -64 + jmp @.L681 + @.L683 + %ptr =l add %ptr, 64 + jmp @.L633 + @.L635 + %ptr =l add %ptr, -72 + @.L687 + %v =l loadl %ptr + jnz %v, @.L688, @.L689 + @.L688 + %ptr =l add %ptr, -72 + jmp @.L687 + @.L689 + %ptr =l add %ptr, 32 + @.L690 + %v =l loadl %ptr + jnz %v, @.L691, @.L692 + @.L691 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L690 + @.L692 + %ptr =l add %ptr, -32 + @.L693 + %v =l loadl %ptr + jnz %v, @.L694, @.L695 + @.L694 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L696 + %v =l loadl %ptr + jnz %v, @.L697, @.L698 + @.L697 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L699 + %v =l loadl %ptr + jnz %v, @.L700, @.L701 + @.L700 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L699 + @.L701 + %ptr =l add %ptr, -16 + @.L702 + %v =l loadl %ptr + jnz %v, @.L703, @.L704 + @.L703 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L702 + @.L704 + %ptr =l add %ptr, 64 + jmp @.L696 + @.L698 + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L705 + %v =l loadl %ptr + jnz %v, @.L706, @.L707 + @.L706 + %ptr =l add %ptr, 8 + @.L708 + %v =l loadl %ptr + jnz %v, @.L709, @.L710 + @.L709 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L711 + %v =l loadl %ptr + jnz %v, @.L712, @.L713 + @.L712 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + @.L714 + %v =l loadl %ptr + jnz %v, @.L715, @.L716 + @.L715 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L714 + @.L716 + %ptr =l add %ptr, -8 + jmp @.L711 + @.L713 + %ptr =l add %ptr, 8 + @.L717 + %v =l loadl %ptr + jnz %v, @.L718, @.L719 + @.L718 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -72 + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L717 + @.L719 + %ptr =l add %ptr, -16 + jmp @.L708 + @.L710 + %ptr =l add %ptr, 8 + @.L720 + %v =l loadl %ptr + jnz %v, @.L721, @.L722 + @.L721 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L723 + %v =l loadl %ptr + jnz %v, @.L724, @.L725 + @.L724 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L723 + @.L725 + %ptr =l add %ptr, -8 + jmp @.L720 + @.L722 + %ptr =l add %ptr, 8 + @.L726 + %v =l loadl %ptr + jnz %v, @.L727, @.L728 + @.L727 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L726 + @.L728 + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -80 + jmp @.L705 + @.L707 + %ptr =l add %ptr, 32 + @.L729 + %v =l loadl %ptr + jnz %v, @.L730, @.L731 + @.L730 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L729 + @.L731 + %ptr =l add %ptr, -32 + jmp @.L693 + @.L695 + %ptr =l add %ptr, 24 + @.L732 + %v =l loadl %ptr + jnz %v, @.L733, @.L734 + @.L733 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L732 + @.L734 + %ptr =l add %ptr, -24 + @.L735 + %v =l loadl %ptr + jnz %v, @.L736, @.L737 + @.L736 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L738 + %v =l loadl %ptr + jnz %v, @.L739, @.L740 + @.L739 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L741 + %v =l loadl %ptr + jnz %v, @.L742, @.L743 + @.L742 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + jmp @.L741 + @.L743 + %ptr =l add %ptr, -8 + @.L744 + %v =l loadl %ptr + jnz %v, @.L745, @.L746 + @.L745 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L744 + @.L746 + %ptr =l add %ptr, 64 + jmp @.L738 + @.L740 + %ptr =l add %ptr, -24 + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L747 + %v =l loadl %ptr + jnz %v, @.L748, @.L749 + @.L748 + %ptr =l add %ptr, 8 + @.L750 + %v =l loadl %ptr + jnz %v, @.L751, @.L752 + @.L751 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L753 + %v =l loadl %ptr + jnz %v, @.L754, @.L755 + @.L754 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + @.L756 + %v =l loadl %ptr + jnz %v, @.L757, @.L758 + @.L757 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L756 + @.L758 + %ptr =l add %ptr, 8 + jmp @.L753 + @.L755 + %ptr =l add %ptr, -8 + @.L759 + %v =l loadl %ptr + jnz %v, @.L760, @.L761 + @.L760 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + jmp @.L759 + @.L761 + %ptr =l add %ptr, -8 + jmp @.L750 + @.L752 + %ptr =l add %ptr, 16 + @.L762 + %v =l loadl %ptr + jnz %v, @.L763, @.L764 + @.L763 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L765 + %v =l loadl %ptr + jnz %v, @.L766, @.L767 + @.L766 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + jmp @.L765 + @.L767 + %ptr =l add %ptr, 8 + jmp @.L762 + @.L764 + %ptr =l add %ptr, -8 + @.L768 + %v =l loadl %ptr + jnz %v, @.L769, @.L770 + @.L769 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L768 + @.L770 + %ptr =l add %ptr, -88 + jmp @.L747 + @.L749 + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + jmp @.L735 + @.L737 + jmp @.L630 + @.L632 + %ptr =l add %ptr, 32 + @.L771 + %v =l loadl %ptr + jnz %v, @.L772, @.L773 + @.L772 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L771 + @.L773 + %ptr =l add %ptr, -32 + @.L774 + %v =l loadl %ptr + jnz %v, @.L775, @.L776 + @.L775 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L777 + %v =l loadl %ptr + jnz %v, @.L778, @.L779 + @.L778 + %ptr =l add %ptr, 72 + jmp @.L777 + @.L779 + %ptr =l add %ptr, -72 + @.L780 + %v =l loadl %ptr + jnz %v, @.L781, @.L782 + @.L781 + %ptr =l add %ptr, 8 + @.L783 + %v =l loadl %ptr + jnz %v, @.L784, @.L785 + @.L784 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L786 + %v =l loadl %ptr + jnz %v, @.L787, @.L788 + @.L787 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + @.L789 + %v =l loadl %ptr + jnz %v, @.L790, @.L791 + @.L790 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L789 + @.L791 + %ptr =l add %ptr, -8 + jmp @.L786 + @.L788 + %ptr =l add %ptr, 8 + @.L792 + %v =l loadl %ptr + jnz %v, @.L793, @.L794 + @.L793 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L792 + @.L794 + %ptr =l add %ptr, -16 + jmp @.L783 + @.L785 + %ptr =l add %ptr, 8 + @.L795 + %v =l loadl %ptr + jnz %v, @.L796, @.L797 + @.L796 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L798 + %v =l loadl %ptr + jnz %v, @.L799, @.L800 + @.L799 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -112 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L798 + @.L800 + %ptr =l add %ptr, -8 + jmp @.L795 + @.L797 + %ptr =l add %ptr, 8 + @.L801 + %v =l loadl %ptr + jnz %v, @.L802, @.L803 + @.L802 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L801 + @.L803 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -40 + jmp @.L780 + @.L782 + jmp @.L774 + @.L776 + %ptr =l add %ptr, 8 + @.L804 + %v =l loadl %ptr + jnz %v, @.L805, @.L806 + @.L805 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L804 + @.L806 + %ptr =l add %ptr, 16 + @.L807 + %v =l loadl %ptr + jnz %v, @.L808, @.L809 + @.L808 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L807 + @.L809 + %ptr =l add %ptr, 8 + @.L810 + %v =l loadl %ptr + jnz %v, @.L811, @.L812 + @.L811 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L810 + @.L812 + %ptr =l add %ptr, 40 + @.L813 + %v =l loadl %ptr + jnz %v, @.L814, @.L815 + @.L814 + %ptr =l add %ptr, 16 + @.L816 + %v =l loadl %ptr + jnz %v, @.L817, @.L818 + @.L817 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L816 + @.L818 + %ptr =l add %ptr, 8 + @.L819 + %v =l loadl %ptr + jnz %v, @.L820, @.L821 + @.L820 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L819 + @.L821 + %ptr =l add %ptr, 48 + jmp @.L813 + @.L815 + %ptr =l add %ptr, -72 + @.L822 + %v =l loadl %ptr + jnz %v, @.L823, @.L824 + @.L823 + %ptr =l add %ptr, -72 + jmp @.L822 + @.L824 + %ptr =l add %ptr, 72 + @.L825 + %v =l loadl %ptr + jnz %v, @.L826, @.L827 + @.L826 + %ptr =l add %ptr, 40 + @.L828 + %v =l loadl %ptr + jnz %v, @.L829, @.L830 + @.L829 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L828 + @.L830 + %ptr =l add %ptr, -32 + @.L831 + %v =l loadl %ptr + jnz %v, @.L832, @.L833 + @.L832 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L831 + @.L833 + %ptr =l add %ptr, 64 + jmp @.L825 + @.L827 + %ptr =l add %ptr, -72 + @.L834 + %v =l loadl %ptr + jnz %v, @.L835, @.L836 + @.L835 + %ptr =l add %ptr, -72 + jmp @.L834 + @.L836 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L837 + %v =l loadl %ptr + jnz %v, @.L838, @.L839 + @.L838 + @.L840 + %v =l loadl %ptr + jnz %v, @.L841, @.L842 + @.L841 + %ptr =l add %ptr, 72 + jmp @.L840 + @.L842 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L843 + %v =l loadl %ptr + jnz %v, @.L844, @.L845 + @.L844 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L843 + @.L845 + %ptr =l add %ptr, 8 + @.L846 + %v =l loadl %ptr + jnz %v, @.L847, @.L848 + @.L847 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L846 + @.L848 + %ptr =l add %ptr, 8 + @.L849 + %v =l loadl %ptr + jnz %v, @.L850, @.L851 + @.L850 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L849 + @.L851 + %ptr =l add %ptr, 8 + @.L852 + %v =l loadl %ptr + jnz %v, @.L853, @.L854 + @.L853 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L852 + @.L854 + %ptr =l add %ptr, 8 + @.L855 + %v =l loadl %ptr + jnz %v, @.L856, @.L857 + @.L856 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L855 + @.L857 + %ptr =l add %ptr, 8 + @.L858 + %v =l loadl %ptr + jnz %v, @.L859, @.L860 + @.L859 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L858 + @.L860 + %ptr =l add %ptr, 8 + @.L861 + %v =l loadl %ptr + jnz %v, @.L862, @.L863 + @.L862 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L861 + @.L863 + %ptr =l add %ptr, 8 + @.L864 + %v =l loadl %ptr + jnz %v, @.L865, @.L866 + @.L865 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L864 + @.L866 + %ptr =l add %ptr, 8 + @.L867 + %v =l loadl %ptr + jnz %v, @.L868, @.L869 + @.L868 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L867 + @.L869 + %ptr =l add %ptr, -72 + @.L870 + %v =l loadl %ptr + jnz %v, @.L871, @.L872 + @.L871 + %ptr =l add %ptr, -72 + jmp @.L870 + @.L872 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L837 + @.L839 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L873 + %v =l loadl %ptr + jnz %v, @.L874, @.L875 + @.L874 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L873 + @.L875 + %ptr =l add %ptr, -72 + @.L876 + %v =l loadl %ptr + jnz %v, @.L877, @.L878 + @.L877 + %ptr =l add %ptr, -72 + jmp @.L876 + @.L878 + %ptr =l add %ptr, 72 + @.L879 + %v =l loadl %ptr + jnz %v, @.L880, @.L881 + @.L880 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L882 + %v =l loadl %ptr + jnz %v, @.L883, @.L884 + @.L883 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L882 + @.L884 + %ptr =l add %ptr, -32 + @.L885 + %v =l loadl %ptr + jnz %v, @.L886, @.L887 + @.L886 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L888 + %v =l loadl %ptr + jnz %v, @.L889, @.L890 + @.L889 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L891 + %v =l loadl %ptr + jnz %v, @.L892, @.L893 + @.L892 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L891 + @.L893 + %ptr =l add %ptr, -16 + @.L894 + %v =l loadl %ptr + jnz %v, @.L895, @.L896 + @.L895 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L894 + @.L896 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L888 + @.L890 + %ptr =l add %ptr, -64 + @.L897 + %v =l loadl %ptr + jnz %v, @.L898, @.L899 + @.L898 + %ptr =l add %ptr, -72 + jmp @.L897 + @.L899 + jmp @.L885 + @.L887 + %ptr =l add %ptr, 72 + @.L900 + %v =l loadl %ptr + jnz %v, @.L901, @.L902 + @.L901 + %ptr =l add %ptr, 72 + jmp @.L900 + @.L902 + %ptr =l add %ptr, -64 + %ptr =l add %ptr, -8 + @.L903 + %v =l loadl %ptr + jnz %v, @.L904, @.L905 + @.L904 + %ptr =l add %ptr, 8 + @.L906 + %v =l loadl %ptr + jnz %v, @.L907, @.L908 + @.L907 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L906 + @.L908 + %ptr =l add %ptr, -80 + jmp @.L903 + @.L905 + %ptr =l add %ptr, 8 + @.L909 + %v =l loadl %ptr + jnz %v, @.L910, @.L911 + @.L910 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L909 + @.L911 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L879 + @.L881 + %ptr =l add %ptr, -72 + @.L912 + %v =l loadl %ptr + jnz %v, @.L913, @.L914 + @.L913 + %ptr =l add %ptr, 8 + @.L915 + %v =l loadl %ptr + jnz %v, @.L916, @.L917 + @.L916 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L915 + @.L917 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L918 + %v =l loadl %ptr + jnz %v, @.L919, @.L920 + @.L919 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L921 + %v =l loadl %ptr + jnz %v, @.L922, @.L923 + @.L922 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L921 + @.L923 + %ptr =l add %ptr, -8 + @.L924 + %v =l loadl %ptr + jnz %v, @.L925, @.L926 + @.L925 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L924 + @.L926 + %ptr =l add %ptr, 24 + jmp @.L918 + @.L920 + %ptr =l add %ptr, -16 + @.L927 + %v =l loadl %ptr + jnz %v, @.L928, @.L929 + @.L928 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L927 + @.L929 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L912 + @.L914 + %ptr =l add %ptr, 72 + @.L930 + %v =l loadl %ptr + jnz %v, @.L931, @.L932 + @.L931 + %ptr =l add %ptr, 24 + @.L933 + %v =l loadl %ptr + jnz %v, @.L934, @.L935 + @.L934 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -288 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 288 + jmp @.L933 + @.L935 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 40 + jmp @.L930 + @.L932 + %ptr =l add %ptr, -72 + @.L936 + %v =l loadl %ptr + jnz %v, @.L937, @.L938 + @.L937 + %ptr =l add %ptr, -72 + jmp @.L936 + @.L938 + %ptr =l add %ptr, 40 + @.L939 + %v =l loadl %ptr + jnz %v, @.L940, @.L941 + @.L940 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L939 + @.L941 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L942 + %v =l loadl %ptr + jnz %v, @.L943, @.L944 + @.L943 + @.L945 + %v =l loadl %ptr + jnz %v, @.L946, @.L947 + @.L946 + %ptr =l add %ptr, 72 + jmp @.L945 + @.L947 + %ptr =l add %ptr, -72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -32 + @.L948 + %v =l loadl %ptr + jnz %v, @.L949, @.L950 + @.L949 + %ptr =l add %ptr, -72 + jmp @.L948 + @.L950 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L942 + @.L944 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L951 + %v =l loadl %ptr + jnz %v, @.L952, @.L953 + @.L952 + %ptr =l add %ptr, 24 + @.L954 + %v =l loadl %ptr + jnz %v, @.L955, @.L956 + @.L955 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L954 + @.L956 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L957 + %v =l loadl %ptr + jnz %v, @.L958, @.L959 + @.L958 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L960 + %v =l loadl %ptr + jnz %v, @.L961, @.L962 + @.L961 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L960 + @.L962 + %ptr =l add %ptr, -32 + @.L963 + %v =l loadl %ptr + jnz %v, @.L964, @.L965 + @.L964 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -64 + @.L966 + %v =l loadl %ptr + jnz %v, @.L967, @.L968 + @.L967 + %ptr =l add %ptr, -72 + jmp @.L966 + @.L968 + %ptr =l add %ptr, 32 + @.L969 + %v =l loadl %ptr + jnz %v, @.L970, @.L971 + @.L970 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L969 + @.L971 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L972 + %v =l loadl %ptr + jnz %v, @.L973, @.L974 + @.L973 + %ptr =l add %ptr, 72 + jmp @.L972 + @.L974 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L963 + @.L965 + jmp @.L957 + @.L959 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L975 + %v =l loadl %ptr + jnz %v, @.L976, @.L977 + @.L976 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L975 + @.L977 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L978 + %v =l loadl %ptr + jnz %v, @.L979, @.L980 + @.L979 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L981 + %v =l loadl %ptr + jnz %v, @.L982, @.L983 + @.L982 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L981 + @.L983 + %ptr =l add %ptr, -24 + @.L984 + %v =l loadl %ptr + jnz %v, @.L985, @.L986 + @.L985 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -96 + @.L987 + %v =l loadl %ptr + jnz %v, @.L988, @.L989 + @.L988 + %ptr =l add %ptr, -72 + jmp @.L987 + @.L989 + %ptr =l add %ptr, 24 + @.L990 + %v =l loadl %ptr + jnz %v, @.L991, @.L992 + @.L991 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L990 + @.L992 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L993 + %v =l loadl %ptr + jnz %v, @.L994, @.L995 + @.L994 + %ptr =l add %ptr, 72 + jmp @.L993 + @.L995 + %ptr =l add %ptr, 8 + @.L996 + %v =l loadl %ptr + jnz %v, @.L997, @.L998 + @.L997 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L996 + @.L998 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L984 + @.L986 + jmp @.L978 + @.L980 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L999 + %v =l loadl %ptr + jnz %v, @.L1000, @.L1001 + @.L1000 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1002 + %v =l loadl %ptr + jnz %v, @.L1003, @.L1004 + @.L1003 + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 56 + jmp @.L1002 + @.L1004 + %ptr =l add %ptr, -64 + jmp @.L999 + @.L1001 + %ptr =l add %ptr, 64 + jmp @.L951 + @.L953 + %ptr =l add %ptr, -72 + @.L1005 + %v =l loadl %ptr + jnz %v, @.L1006, @.L1007 + @.L1006 + %ptr =l add %ptr, -72 + jmp @.L1005 + @.L1007 + %ptr =l add %ptr, 24 + @.L1008 + %v =l loadl %ptr + jnz %v, @.L1009, @.L1010 + @.L1009 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1008 + @.L1010 + %ptr =l add %ptr, -24 + @.L1011 + %v =l loadl %ptr + jnz %v, @.L1012, @.L1013 + @.L1012 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L1014 + %v =l loadl %ptr + jnz %v, @.L1015, @.L1016 + @.L1015 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L1017 + %v =l loadl %ptr + jnz %v, @.L1018, @.L1019 + @.L1018 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1017 + @.L1019 + %ptr =l add %ptr, -24 + @.L1020 + %v =l loadl %ptr + jnz %v, @.L1021, @.L1022 + @.L1021 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1020 + @.L1022 + %ptr =l add %ptr, 64 + jmp @.L1014 + @.L1016 + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1023 + %v =l loadl %ptr + jnz %v, @.L1024, @.L1025 + @.L1024 + %ptr =l add %ptr, 8 + @.L1026 + %v =l loadl %ptr + jnz %v, @.L1027, @.L1028 + @.L1027 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1029 + %v =l loadl %ptr + jnz %v, @.L1030, @.L1031 + @.L1030 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 96 + @.L1032 + %v =l loadl %ptr + jnz %v, @.L1033, @.L1034 + @.L1033 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1032 + @.L1034 + %ptr =l add %ptr, -8 + jmp @.L1029 + @.L1031 + %ptr =l add %ptr, 8 + @.L1035 + %v =l loadl %ptr + jnz %v, @.L1036, @.L1037 + @.L1036 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 96 + jmp @.L1035 + @.L1037 + %ptr =l add %ptr, -24 + jmp @.L1026 + @.L1028 + %ptr =l add %ptr, 16 + @.L1038 + %v =l loadl %ptr + jnz %v, @.L1039, @.L1040 + @.L1039 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1041 + %v =l loadl %ptr + jnz %v, @.L1042, @.L1043 + @.L1042 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 96 + jmp @.L1041 + @.L1043 + %ptr =l add %ptr, -8 + jmp @.L1038 + @.L1040 + %ptr =l add %ptr, 8 + @.L1044 + %v =l loadl %ptr + jnz %v, @.L1045, @.L1046 + @.L1045 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1044 + @.L1046 + %ptr =l add %ptr, -104 + jmp @.L1023 + @.L1025 + jmp @.L1011 + @.L1013 + %ptr =l add %ptr, 32 + @.L1047 + %v =l loadl %ptr + jnz %v, @.L1048, @.L1049 + @.L1048 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1047 + @.L1049 + %ptr =l add %ptr, -32 + @.L1050 + %v =l loadl %ptr + jnz %v, @.L1051, @.L1052 + @.L1051 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1053 + %v =l loadl %ptr + jnz %v, @.L1054, @.L1055 + @.L1054 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1056 + %v =l loadl %ptr + jnz %v, @.L1057, @.L1058 + @.L1057 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1056 + @.L1058 + %ptr =l add %ptr, -16 + @.L1059 + %v =l loadl %ptr + jnz %v, @.L1060, @.L1061 + @.L1060 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1059 + @.L1061 + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 48 + jmp @.L1053 + @.L1055 + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1062 + %v =l loadl %ptr + jnz %v, @.L1063, @.L1064 + @.L1063 + %ptr =l add %ptr, 8 + @.L1065 + %v =l loadl %ptr + jnz %v, @.L1066, @.L1067 + @.L1066 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1068 + %v =l loadl %ptr + jnz %v, @.L1069, @.L1070 + @.L1069 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + @.L1071 + %v =l loadl %ptr + jnz %v, @.L1072, @.L1073 + @.L1072 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + jmp @.L1071 + @.L1073 + %ptr =l add %ptr, 8 + jmp @.L1068 + @.L1070 + %ptr =l add %ptr, -8 + @.L1074 + %v =l loadl %ptr + jnz %v, @.L1075, @.L1076 + @.L1075 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 56 + jmp @.L1074 + @.L1076 + %ptr =l add %ptr, -16 + jmp @.L1065 + @.L1067 + %ptr =l add %ptr, 24 + @.L1077 + %v =l loadl %ptr + jnz %v, @.L1078, @.L1079 + @.L1078 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1080 + %v =l loadl %ptr + jnz %v, @.L1081, @.L1082 + @.L1081 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -80 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L1080 + @.L1082 + %ptr =l add %ptr, 8 + jmp @.L1077 + @.L1079 + %ptr =l add %ptr, -8 + @.L1083 + %v =l loadl %ptr + jnz %v, @.L1084, @.L1085 + @.L1084 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + jmp @.L1083 + @.L1085 + %ptr =l add %ptr, -96 + jmp @.L1062 + @.L1064 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L1050 + @.L1052 + %ptr =l add %ptr, 72 + @.L1086 + %v =l loadl %ptr + jnz %v, @.L1087, @.L1088 + @.L1087 + %ptr =l add %ptr, 24 + @.L1089 + %v =l loadl %ptr + jnz %v, @.L1090, @.L1091 + @.L1090 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1089 + @.L1091 + %ptr =l add %ptr, 8 + @.L1092 + %v =l loadl %ptr + jnz %v, @.L1093, @.L1094 + @.L1093 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1092 + @.L1094 + %ptr =l add %ptr, 8 + @.L1095 + %v =l loadl %ptr + jnz %v, @.L1096, @.L1097 + @.L1096 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1095 + @.L1097 + %ptr =l add %ptr, 32 + jmp @.L1086 + @.L1088 + %ptr =l add %ptr, -72 + @.L1098 + %v =l loadl %ptr + jnz %v, @.L1099, @.L1100 + @.L1099 + %ptr =l add %ptr, -72 + jmp @.L1098 + @.L1100 + %ptr =l add %ptr, 24 + @.L1101 + %v =l loadl %ptr + jnz %v, @.L1102, @.L1103 + @.L1102 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1101 + @.L1103 + %ptr =l add %ptr, 8 + @.L1104 + %v =l loadl %ptr + jnz %v, @.L1105, @.L1106 + @.L1105 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1104 + @.L1106 + %ptr =l add %ptr, 40 + @.L1107 + %v =l loadl %ptr + jnz %v, @.L1108, @.L1109 + @.L1108 + %ptr =l add %ptr, 56 + @.L1110 + %v =l loadl %ptr + jnz %v, @.L1111, @.L1112 + @.L1111 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L1110 + @.L1112 + %ptr =l add %ptr, -48 + @.L1113 + %v =l loadl %ptr + jnz %v, @.L1114, @.L1115 + @.L1114 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1113 + @.L1115 + %ptr =l add %ptr, 64 + jmp @.L1107 + @.L1109 + %ptr =l add %ptr, -72 + @.L1116 + %v =l loadl %ptr + jnz %v, @.L1117, @.L1118 + @.L1117 + %ptr =l add %ptr, -72 + jmp @.L1116 + @.L1118 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1119 + %v =l loadl %ptr + jnz %v, @.L1120, @.L1121 + @.L1120 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 8 + jmp @.L1119 + @.L1121 + %ptr =l add %ptr, 16 + @.L1122 + %v =l loadl %ptr + jnz %v, @.L1123, @.L1124 + @.L1123 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + @.L1125 + %v =l loadl %ptr + jnz %v, @.L1126, @.L1127 + @.L1126 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 2 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L1125 + @.L1127 + %ptr =l add %ptr, 40 + @.L1128 + %v =l loadl %ptr + jnz %v, @.L1129, @.L1130 + @.L1129 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1128 + @.L1130 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1122 + @.L1124 + %ptr =l add %ptr, -16 + @.L1131 + %v =l loadl %ptr + jnz %v, @.L1132, @.L1133 + @.L1132 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1131 + @.L1133 + %ptr =l add %ptr, -40 + @.L1134 + %v =l loadl %ptr + jnz %v, @.L1135, @.L1136 + @.L1135 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -24 + jmp @.L1134 + @.L1136 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1137 + %v =l loadl %ptr + jnz %v, @.L1138, @.L1139 + @.L1138 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1137 + @.L1139 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L1140 + %v =l loadl %ptr + jnz %v, @.L1141, @.L1142 + @.L1141 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1143 + %v =l loadl %ptr + jnz %v, @.L1144, @.L1145 + @.L1144 + %ptr =l add %ptr, 24 + @.L1146 + %v =l loadl %ptr + jnz %v, @.L1147, @.L1148 + @.L1147 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1146 + @.L1148 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1149 + %v =l loadl %ptr + jnz %v, @.L1150, @.L1151 + @.L1150 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1152 + %v =l loadl %ptr + jnz %v, @.L1153, @.L1154 + @.L1153 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1152 + @.L1154 + %ptr =l add %ptr, -16 + @.L1155 + %v =l loadl %ptr + jnz %v, @.L1156, @.L1157 + @.L1156 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -72 + @.L1158 + %v =l loadl %ptr + jnz %v, @.L1159, @.L1160 + @.L1159 + %ptr =l add %ptr, -72 + jmp @.L1158 + @.L1160 + %ptr =l add %ptr, 32 + @.L1161 + %v =l loadl %ptr + jnz %v, @.L1162, @.L1163 + @.L1162 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1161 + @.L1163 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1164 + %v =l loadl %ptr + jnz %v, @.L1165, @.L1166 + @.L1165 + %ptr =l add %ptr, 72 + jmp @.L1164 + @.L1166 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1155 + @.L1157 + jmp @.L1149 + @.L1151 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1167 + %v =l loadl %ptr + jnz %v, @.L1168, @.L1169 + @.L1168 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1167 + @.L1169 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1170 + %v =l loadl %ptr + jnz %v, @.L1171, @.L1172 + @.L1171 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1173 + %v =l loadl %ptr + jnz %v, @.L1174, @.L1175 + @.L1174 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1173 + @.L1175 + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -16 + @.L1176 + %v =l loadl %ptr + jnz %v, @.L1177, @.L1178 + @.L1177 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -96 + @.L1179 + %v =l loadl %ptr + jnz %v, @.L1180, @.L1181 + @.L1180 + %ptr =l add %ptr, -72 + jmp @.L1179 + @.L1181 + %ptr =l add %ptr, 24 + @.L1182 + %v =l loadl %ptr + jnz %v, @.L1183, @.L1184 + @.L1183 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1182 + @.L1184 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L1185 + %v =l loadl %ptr + jnz %v, @.L1186, @.L1187 + @.L1186 + %ptr =l add %ptr, 72 + jmp @.L1185 + @.L1187 + %ptr =l add %ptr, 8 + @.L1188 + %v =l loadl %ptr + jnz %v, @.L1189, @.L1190 + @.L1189 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1188 + @.L1190 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1176 + @.L1178 + jmp @.L1170 + @.L1172 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1191 + %v =l loadl %ptr + jnz %v, @.L1192, @.L1193 + @.L1192 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1194 + %v =l loadl %ptr + jnz %v, @.L1195, @.L1196 + @.L1195 + %ptr =l add %ptr, 72 + jmp @.L1194 + @.L1196 + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -56 + jmp @.L1191 + @.L1193 + %ptr =l add %ptr, 64 + jmp @.L1143 + @.L1145 + %ptr =l add %ptr, -72 + @.L1197 + %v =l loadl %ptr + jnz %v, @.L1198, @.L1199 + @.L1198 + %ptr =l add %ptr, -72 + jmp @.L1197 + @.L1199 + %ptr =l add %ptr, 24 + @.L1200 + %v =l loadl %ptr + jnz %v, @.L1201, @.L1202 + @.L1201 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1200 + @.L1202 + %ptr =l add %ptr, -24 + @.L1203 + %v =l loadl %ptr + jnz %v, @.L1204, @.L1205 + @.L1204 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L1206 + %v =l loadl %ptr + jnz %v, @.L1207, @.L1208 + @.L1207 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1209 + %v =l loadl %ptr + jnz %v, @.L1210, @.L1211 + @.L1210 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + jmp @.L1209 + @.L1211 + %ptr =l add %ptr, -8 + @.L1212 + %v =l loadl %ptr + jnz %v, @.L1213, @.L1214 + @.L1213 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1212 + @.L1214 + %ptr =l add %ptr, 64 + jmp @.L1206 + @.L1208 + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1215 + %v =l loadl %ptr + jnz %v, @.L1216, @.L1217 + @.L1216 + %ptr =l add %ptr, 8 + @.L1218 + %v =l loadl %ptr + jnz %v, @.L1219, @.L1220 + @.L1219 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1221 + %v =l loadl %ptr + jnz %v, @.L1222, @.L1223 + @.L1222 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + @.L1224 + %v =l loadl %ptr + jnz %v, @.L1225, @.L1226 + @.L1225 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1224 + @.L1226 + %ptr =l add %ptr, 8 + jmp @.L1221 + @.L1223 + %ptr =l add %ptr, -8 + @.L1227 + %v =l loadl %ptr + jnz %v, @.L1228, @.L1229 + @.L1228 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + jmp @.L1227 + @.L1229 + %ptr =l add %ptr, -8 + jmp @.L1218 + @.L1220 + %ptr =l add %ptr, 16 + @.L1230 + %v =l loadl %ptr + jnz %v, @.L1231, @.L1232 + @.L1231 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1233 + %v =l loadl %ptr + jnz %v, @.L1234, @.L1235 + @.L1234 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 80 + jmp @.L1233 + @.L1235 + %ptr =l add %ptr, 8 + jmp @.L1230 + @.L1232 + %ptr =l add %ptr, -8 + @.L1236 + %v =l loadl %ptr + jnz %v, @.L1237, @.L1238 + @.L1237 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1236 + @.L1238 + %ptr =l add %ptr, -88 + jmp @.L1215 + @.L1217 + %ptr =l add %ptr, 40 + @.L1239 + %v =l loadl %ptr + jnz %v, @.L1240, @.L1241 + @.L1240 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1239 + @.L1241 + %ptr =l add %ptr, 16 + @.L1242 + %v =l loadl %ptr + jnz %v, @.L1243, @.L1244 + @.L1243 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1242 + @.L1244 + %ptr =l add %ptr, -56 + @.L1245 + %v =l loadl %ptr + jnz %v, @.L1246, @.L1247 + @.L1246 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L1245 + @.L1247 + jmp @.L1203 + @.L1205 + %ptr =l add %ptr, 32 + @.L1248 + %v =l loadl %ptr + jnz %v, @.L1249, @.L1250 + @.L1249 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 24 + jmp @.L1248 + @.L1250 + %ptr =l add %ptr, -32 + @.L1251 + %v =l loadl %ptr + jnz %v, @.L1252, @.L1253 + @.L1252 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1254 + %v =l loadl %ptr + jnz %v, @.L1255, @.L1256 + @.L1255 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1257 + %v =l loadl %ptr + jnz %v, @.L1258, @.L1259 + @.L1258 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1257 + @.L1259 + %ptr =l add %ptr, -16 + @.L1260 + %v =l loadl %ptr + jnz %v, @.L1261, @.L1262 + @.L1261 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1260 + @.L1262 + %ptr =l add %ptr, 64 + jmp @.L1254 + @.L1256 + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1263 + %v =l loadl %ptr + jnz %v, @.L1264, @.L1265 + @.L1264 + %ptr =l add %ptr, 8 + @.L1266 + %v =l loadl %ptr + jnz %v, @.L1267, @.L1268 + @.L1267 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1269 + %v =l loadl %ptr + jnz %v, @.L1270, @.L1271 + @.L1270 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + @.L1272 + %v =l loadl %ptr + jnz %v, @.L1273, @.L1274 + @.L1273 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1272 + @.L1274 + %ptr =l add %ptr, -8 + jmp @.L1269 + @.L1271 + %ptr =l add %ptr, 8 + @.L1275 + %v =l loadl %ptr + jnz %v, @.L1276, @.L1277 + @.L1276 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L1275 + @.L1277 + %ptr =l add %ptr, -16 + jmp @.L1266 + @.L1268 + %ptr =l add %ptr, 8 + @.L1278 + %v =l loadl %ptr + jnz %v, @.L1279, @.L1280 + @.L1279 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1281 + %v =l loadl %ptr + jnz %v, @.L1282, @.L1283 + @.L1282 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L1281 + @.L1283 + %ptr =l add %ptr, -8 + jmp @.L1278 + @.L1280 + %ptr =l add %ptr, 8 + @.L1284 + %v =l loadl %ptr + jnz %v, @.L1285, @.L1286 + @.L1285 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1284 + @.L1286 + %ptr =l add %ptr, -96 + jmp @.L1263 + @.L1265 + jmp @.L1251 + @.L1253 + %ptr =l add %ptr, 32 + @.L1287 + %v =l loadl %ptr + jnz %v, @.L1288, @.L1289 + @.L1288 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1287 + @.L1289 + %ptr =l add %ptr, -32 + jmp @.L1140 + @.L1142 + %ptr =l add %ptr, 32 + @.L1290 + %v =l loadl %ptr + jnz %v, @.L1291, @.L1292 + @.L1291 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 16 + jmp @.L1290 + @.L1292 + %ptr =l add %ptr, -32 + @.L1293 + %v =l loadl %ptr + jnz %v, @.L1294, @.L1295 + @.L1294 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1296 + %v =l loadl %ptr + jnz %v, @.L1297, @.L1298 + @.L1297 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1296 + @.L1298 + %ptr =l add %ptr, 16 + @.L1299 + %v =l loadl %ptr + jnz %v, @.L1300, @.L1301 + @.L1300 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1299 + @.L1301 + %ptr =l add %ptr, -56 + @.L1302 + %v =l loadl %ptr + jnz %v, @.L1303, @.L1304 + @.L1303 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L1302 + @.L1304 + %ptr =l add %ptr, 72 + @.L1305 + %v =l loadl %ptr + jnz %v, @.L1306, @.L1307 + @.L1306 + %ptr =l add %ptr, 48 + %ptr =l add %ptr, 24 + jmp @.L1305 + @.L1307 + %ptr =l add %ptr, -72 + @.L1308 + %v =l loadl %ptr + jnz %v, @.L1309, @.L1310 + @.L1309 + %ptr =l add %ptr, 8 + @.L1311 + %v =l loadl %ptr + jnz %v, @.L1312, @.L1313 + @.L1312 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1314 + %v =l loadl %ptr + jnz %v, @.L1315, @.L1316 + @.L1315 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + @.L1317 + %v =l loadl %ptr + jnz %v, @.L1318, @.L1319 + @.L1318 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1317 + @.L1319 + %ptr =l add %ptr, -8 + jmp @.L1314 + @.L1316 + %ptr =l add %ptr, 8 + @.L1320 + %v =l loadl %ptr + jnz %v, @.L1321, @.L1322 + @.L1321 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L1320 + @.L1322 + %ptr =l add %ptr, -16 + jmp @.L1311 + @.L1313 + %ptr =l add %ptr, 8 + @.L1323 + %v =l loadl %ptr + jnz %v, @.L1324, @.L1325 + @.L1324 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1326 + %v =l loadl %ptr + jnz %v, @.L1327, @.L1328 + @.L1327 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -104 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 88 + jmp @.L1326 + @.L1328 + %ptr =l add %ptr, -8 + jmp @.L1323 + @.L1325 + %ptr =l add %ptr, 8 + @.L1329 + %v =l loadl %ptr + jnz %v, @.L1330, @.L1331 + @.L1330 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1329 + @.L1331 + %ptr =l add %ptr, -64 + %ptr =l add %ptr, -32 + jmp @.L1308 + @.L1310 + jmp @.L1293 + @.L1295 + %ptr =l add %ptr, 72 + @.L1332 + %v =l loadl %ptr + jnz %v, @.L1333, @.L1334 + @.L1333 + %ptr =l add %ptr, 16 + @.L1335 + %v =l loadl %ptr + jnz %v, @.L1336, @.L1337 + @.L1336 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1335 + @.L1337 + %ptr =l add %ptr, 8 + @.L1338 + %v =l loadl %ptr + jnz %v, @.L1339, @.L1340 + @.L1339 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1338 + @.L1340 + %ptr =l add %ptr, 48 + jmp @.L1332 + @.L1334 + %ptr =l add %ptr, -72 + @.L1341 + %v =l loadl %ptr + jnz %v, @.L1342, @.L1343 + @.L1342 + %ptr =l add %ptr, -72 + jmp @.L1341 + @.L1343 + %ptr =l add %ptr, 24 + @.L1344 + %v =l loadl %ptr + jnz %v, @.L1345, @.L1346 + @.L1345 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1344 + @.L1346 + %ptr =l add %ptr, 8 + @.L1347 + %v =l loadl %ptr + jnz %v, @.L1348, @.L1349 + @.L1348 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1347 + @.L1349 + %ptr =l add %ptr, 40 + @.L1350 + %v =l loadl %ptr + jnz %v, @.L1351, @.L1352 + @.L1351 + %ptr =l add %ptr, 40 + @.L1353 + %v =l loadl %ptr + jnz %v, @.L1354, @.L1355 + @.L1354 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1353 + @.L1355 + %ptr =l add %ptr, -32 + @.L1356 + %v =l loadl %ptr + jnz %v, @.L1357, @.L1358 + @.L1357 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1356 + @.L1358 + %ptr =l add %ptr, 64 + jmp @.L1350 + @.L1352 + %ptr =l add %ptr, -72 + @.L1359 + %v =l loadl %ptr + jnz %v, @.L1360, @.L1361 + @.L1360 + %ptr =l add %ptr, -72 + jmp @.L1359 + @.L1361 + %ptr =l add %ptr, 72 + @.L1362 + %v =l loadl %ptr + jnz %v, @.L1363, @.L1364 + @.L1363 + %ptr =l add %ptr, 48 + @.L1365 + %v =l loadl %ptr + jnz %v, @.L1366, @.L1367 + @.L1366 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1365 + @.L1367 + %ptr =l add %ptr, -40 + @.L1368 + %v =l loadl %ptr + jnz %v, @.L1369, @.L1370 + @.L1369 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1368 + @.L1370 + %ptr =l add %ptr, 64 + jmp @.L1362 + @.L1364 + %ptr =l add %ptr, -72 + @.L1371 + %v =l loadl %ptr + jnz %v, @.L1372, @.L1373 + @.L1372 + %ptr =l add %ptr, -72 + jmp @.L1371 + @.L1373 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L1374 + %v =l loadl %ptr + jnz %v, @.L1375, @.L1376 + @.L1375 + @.L1377 + %v =l loadl %ptr + jnz %v, @.L1378, @.L1379 + @.L1378 + %ptr =l add %ptr, 32 + %ptr =l add %ptr, 40 + jmp @.L1377 + @.L1379 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1380 + %v =l loadl %ptr + jnz %v, @.L1381, @.L1382 + @.L1381 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1380 + @.L1382 + %ptr =l add %ptr, 8 + @.L1383 + %v =l loadl %ptr + jnz %v, @.L1384, @.L1385 + @.L1384 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1383 + @.L1385 + %ptr =l add %ptr, 8 + @.L1386 + %v =l loadl %ptr + jnz %v, @.L1387, @.L1388 + @.L1387 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1386 + @.L1388 + %ptr =l add %ptr, 8 + @.L1389 + %v =l loadl %ptr + jnz %v, @.L1390, @.L1391 + @.L1390 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1389 + @.L1391 + %ptr =l add %ptr, 8 + @.L1392 + %v =l loadl %ptr + jnz %v, @.L1393, @.L1394 + @.L1393 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1392 + @.L1394 + %ptr =l add %ptr, 8 + @.L1395 + %v =l loadl %ptr + jnz %v, @.L1396, @.L1397 + @.L1396 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1395 + @.L1397 + %ptr =l add %ptr, 8 + @.L1398 + %v =l loadl %ptr + jnz %v, @.L1399, @.L1400 + @.L1399 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1398 + @.L1400 + %ptr =l add %ptr, 8 + @.L1401 + %v =l loadl %ptr + jnz %v, @.L1402, @.L1403 + @.L1402 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1401 + @.L1403 + %ptr =l add %ptr, 8 + @.L1404 + %v =l loadl %ptr + jnz %v, @.L1405, @.L1406 + @.L1405 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1404 + @.L1406 + %ptr =l add %ptr, -72 + @.L1407 + %v =l loadl %ptr + jnz %v, @.L1408, @.L1409 + @.L1408 + %ptr =l add %ptr, -72 + jmp @.L1407 + @.L1409 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1374 + @.L1376 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L1410 + %v =l loadl %ptr + jnz %v, @.L1411, @.L1412 + @.L1411 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 48 + jmp @.L1410 + @.L1412 + %ptr =l add %ptr, -72 + @.L1413 + %v =l loadl %ptr + jnz %v, @.L1414, @.L1415 + @.L1414 + %ptr =l add %ptr, -72 + jmp @.L1413 + @.L1415 + %ptr =l add %ptr, 72 + @.L1416 + %v =l loadl %ptr + jnz %v, @.L1417, @.L1418 + @.L1417 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1419 + %v =l loadl %ptr + jnz %v, @.L1420, @.L1421 + @.L1420 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1419 + @.L1421 + %ptr =l add %ptr, -32 + @.L1422 + %v =l loadl %ptr + jnz %v, @.L1423, @.L1424 + @.L1423 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L1425 + %v =l loadl %ptr + jnz %v, @.L1426, @.L1427 + @.L1426 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1428 + %v =l loadl %ptr + jnz %v, @.L1429, @.L1430 + @.L1429 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1428 + @.L1430 + %ptr =l add %ptr, -16 + @.L1431 + %v =l loadl %ptr + jnz %v, @.L1432, @.L1433 + @.L1432 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L1431 + @.L1433 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L1425 + @.L1427 + %ptr =l add %ptr, -64 + @.L1434 + %v =l loadl %ptr + jnz %v, @.L1435, @.L1436 + @.L1435 + %ptr =l add %ptr, -72 + jmp @.L1434 + @.L1436 + jmp @.L1422 + @.L1424 + %ptr =l add %ptr, 72 + @.L1437 + %v =l loadl %ptr + jnz %v, @.L1438, @.L1439 + @.L1438 + %ptr =l add %ptr, 72 + jmp @.L1437 + @.L1439 + %ptr =l add %ptr, -72 + @.L1440 + %v =l loadl %ptr + jnz %v, @.L1441, @.L1442 + @.L1441 + %ptr =l add %ptr, 8 + @.L1443 + %v =l loadl %ptr + jnz %v, @.L1444, @.L1445 + @.L1444 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1443 + @.L1445 + %ptr =l add %ptr, -80 + jmp @.L1440 + @.L1442 + %ptr =l add %ptr, 8 + @.L1446 + %v =l loadl %ptr + jnz %v, @.L1447, @.L1448 + @.L1447 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1446 + @.L1448 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1416 + @.L1418 + %ptr =l add %ptr, -72 + @.L1449 + %v =l loadl %ptr + jnz %v, @.L1450, @.L1451 + @.L1450 + %ptr =l add %ptr, 8 + @.L1452 + %v =l loadl %ptr + jnz %v, @.L1453, @.L1454 + @.L1453 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1452 + @.L1454 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1455 + %v =l loadl %ptr + jnz %v, @.L1456, @.L1457 + @.L1456 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1458 + %v =l loadl %ptr + jnz %v, @.L1459, @.L1460 + @.L1459 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L1458 + @.L1460 + %ptr =l add %ptr, -8 + @.L1461 + %v =l loadl %ptr + jnz %v, @.L1462, @.L1463 + @.L1462 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1461 + @.L1463 + %ptr =l add %ptr, 32 + jmp @.L1455 + @.L1457 + %ptr =l add %ptr, -24 + @.L1464 + %v =l loadl %ptr + jnz %v, @.L1465, @.L1466 + @.L1465 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1464 + @.L1466 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1449 + @.L1451 + %ptr =l add %ptr, 72 + @.L1467 + %v =l loadl %ptr + jnz %v, @.L1468, @.L1469 + @.L1468 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1467 + @.L1469 + %ptr =l add %ptr, -72 + @.L1470 + %v =l loadl %ptr + jnz %v, @.L1471, @.L1472 + @.L1471 + %ptr =l add %ptr, -72 + jmp @.L1470 + @.L1472 + %ptr =l add %ptr, 72 + @.L1473 + %v =l loadl %ptr + jnz %v, @.L1474, @.L1475 + @.L1474 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1476 + %v =l loadl %ptr + jnz %v, @.L1477, @.L1478 + @.L1477 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1476 + @.L1478 + %ptr =l add %ptr, -40 + @.L1479 + %v =l loadl %ptr + jnz %v, @.L1480, @.L1481 + @.L1480 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %ptr =l add %ptr, -16 + @.L1482 + %v =l loadl %ptr + jnz %v, @.L1483, @.L1484 + @.L1483 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + @.L1485 + %v =l loadl %ptr + jnz %v, @.L1486, @.L1487 + @.L1486 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1485 + @.L1487 + %ptr =l add %ptr, -24 + @.L1488 + %v =l loadl %ptr + jnz %v, @.L1489, @.L1490 + @.L1489 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L1488 + @.L1490 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + jmp @.L1482 + @.L1484 + %ptr =l add %ptr, -64 + @.L1491 + %v =l loadl %ptr + jnz %v, @.L1492, @.L1493 + @.L1492 + %ptr =l add %ptr, -72 + jmp @.L1491 + @.L1493 + jmp @.L1479 + @.L1481 + %ptr =l add %ptr, 72 + @.L1494 + %v =l loadl %ptr + jnz %v, @.L1495, @.L1496 + @.L1495 + %ptr =l add %ptr, 48 + %ptr =l add %ptr, 24 + jmp @.L1494 + @.L1496 + %ptr =l add %ptr, -72 + @.L1497 + %v =l loadl %ptr + jnz %v, @.L1498, @.L1499 + @.L1498 + %ptr =l add %ptr, 16 + @.L1500 + %v =l loadl %ptr + jnz %v, @.L1501, @.L1502 + @.L1501 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1500 + @.L1502 + %ptr =l add %ptr, -88 + jmp @.L1497 + @.L1499 + %ptr =l add %ptr, 16 + @.L1503 + %v =l loadl %ptr + jnz %v, @.L1504, @.L1505 + @.L1504 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1503 + @.L1505 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %ptr =l add %ptr, 40 + jmp @.L1473 + @.L1475 + %ptr =l add %ptr, -72 + @.L1506 + %v =l loadl %ptr + jnz %v, @.L1507, @.L1508 + @.L1507 + %ptr =l add %ptr, 8 + @.L1509 + %v =l loadl %ptr + jnz %v, @.L1510, @.L1511 + @.L1510 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1509 + @.L1511 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1512 + %v =l loadl %ptr + jnz %v, @.L1513, @.L1514 + @.L1513 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1515 + %v =l loadl %ptr + jnz %v, @.L1516, @.L1517 + @.L1516 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L1515 + @.L1517 + %ptr =l add %ptr, -8 + @.L1518 + %v =l loadl %ptr + jnz %v, @.L1519, @.L1520 + @.L1519 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1518 + @.L1520 + %ptr =l add %ptr, 32 + jmp @.L1512 + @.L1514 + %ptr =l add %ptr, -24 + @.L1521 + %v =l loadl %ptr + jnz %v, @.L1522, @.L1523 + @.L1522 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1521 + @.L1523 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1506 + @.L1508 + %ptr =l add %ptr, 72 + @.L1524 + %v =l loadl %ptr + jnz %v, @.L1525, @.L1526 + @.L1525 + %ptr =l add %ptr, 32 + @.L1527 + %v =l loadl %ptr + jnz %v, @.L1528, @.L1529 + @.L1528 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -288 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 136 + %ptr =l add %ptr, 152 + jmp @.L1527 + @.L1529 + %ptr =l add %ptr, 40 + jmp @.L1524 + @.L1526 + %ptr =l add %ptr, -72 + @.L1530 + %v =l loadl %ptr + jnz %v, @.L1531, @.L1532 + @.L1531 + %ptr =l add %ptr, -72 + jmp @.L1530 + @.L1532 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 15 + storel %v, %ptr + @.L1533 + %v =l loadl %ptr + jnz %v, @.L1534, @.L1535 + @.L1534 + @.L1536 + %v =l loadl %ptr + jnz %v, @.L1537, @.L1538 + @.L1537 + %ptr =l add %ptr, 64 + %ptr =l add %ptr, 8 + jmp @.L1536 + @.L1538 + %ptr =l add %ptr, -72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -72 + @.L1539 + %v =l loadl %ptr + jnz %v, @.L1540, @.L1541 + @.L1540 + %ptr =l add %ptr, -72 + jmp @.L1539 + @.L1541 + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1533 + @.L1535 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 168 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1542 + %v =l loadl %ptr + jnz %v, @.L1543, @.L1544 + @.L1543 + %ptr =l add %ptr, -72 + jmp @.L1542 + @.L1544 + %ptr =l add %ptr, 72 + @.L1545 + %v =l loadl %ptr + jnz %v, @.L1546, @.L1547 + @.L1546 + %ptr =l add %ptr, 24 + @.L1548 + %v =l loadl %ptr + jnz %v, @.L1549, @.L1550 + @.L1549 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1548 + @.L1550 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + @.L1551 + %v =l loadl %ptr + jnz %v, @.L1552, @.L1553 + @.L1552 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1554 + %v =l loadl %ptr + jnz %v, @.L1555, @.L1556 + @.L1555 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1554 + @.L1556 + %ptr =l add %ptr, -32 + @.L1557 + %v =l loadl %ptr + jnz %v, @.L1558, @.L1559 + @.L1558 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -104 + @.L1560 + %v =l loadl %ptr + jnz %v, @.L1561, @.L1562 + @.L1561 + %ptr =l add %ptr, -72 + jmp @.L1560 + @.L1562 + %ptr =l add %ptr, 32 + @.L1563 + %v =l loadl %ptr + jnz %v, @.L1564, @.L1565 + @.L1564 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1563 + @.L1565 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1566 + %v =l loadl %ptr + jnz %v, @.L1567, @.L1568 + @.L1567 + %ptr =l add %ptr, 72 + jmp @.L1566 + @.L1568 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1557 + @.L1559 + jmp @.L1551 + @.L1553 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1569 + %v =l loadl %ptr + jnz %v, @.L1570, @.L1571 + @.L1570 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1569 + @.L1571 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L1572 + %v =l loadl %ptr + jnz %v, @.L1573, @.L1574 + @.L1573 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1575 + %v =l loadl %ptr + jnz %v, @.L1576, @.L1577 + @.L1576 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1575 + @.L1577 + %ptr =l add %ptr, -24 + @.L1578 + %v =l loadl %ptr + jnz %v, @.L1579, @.L1580 + @.L1579 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -88 + @.L1581 + %v =l loadl %ptr + jnz %v, @.L1582, @.L1583 + @.L1582 + %ptr =l add %ptr, -72 + jmp @.L1581 + @.L1583 + %ptr =l add %ptr, 24 + @.L1584 + %v =l loadl %ptr + jnz %v, @.L1585, @.L1586 + @.L1585 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1584 + @.L1586 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L1587 + %v =l loadl %ptr + jnz %v, @.L1588, @.L1589 + @.L1588 + %ptr =l add %ptr, 72 + jmp @.L1587 + @.L1589 + %ptr =l add %ptr, 8 + @.L1590 + %v =l loadl %ptr + jnz %v, @.L1591, @.L1592 + @.L1591 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1590 + @.L1592 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1578 + @.L1580 + jmp @.L1572 + @.L1574 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1593 + %v =l loadl %ptr + jnz %v, @.L1594, @.L1595 + @.L1594 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1596 + %v =l loadl %ptr + jnz %v, @.L1597, @.L1598 + @.L1597 + %ptr =l add %ptr, 72 + jmp @.L1596 + @.L1598 + %ptr =l add %ptr, -64 + jmp @.L1593 + @.L1595 + %ptr =l add %ptr, 8 + %ptr =l add %ptr, 56 + jmp @.L1545 + @.L1547 + %ptr =l add %ptr, -72 + @.L1599 + %v =l loadl %ptr + jnz %v, @.L1600, @.L1601 + @.L1600 + %ptr =l add %ptr, -72 + jmp @.L1599 + @.L1601 + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1602 + %v =l loadl %ptr + jnz %v, @.L1603, @.L1604 + @.L1603 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1602 + @.L1604 + %ptr =l add %ptr, -32 + @.L1605 + %v =l loadl %ptr + jnz %v, @.L1606, @.L1607 + @.L1606 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1608 + %v =l loadl %ptr + jnz %v, @.L1609, @.L1610 + @.L1609 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1608 + @.L1610 + %ptr =l add %ptr, -16 + jmp @.L1605 + @.L1607 + %ptr =l add %ptr, 16 + jmp @.L342 + @.L344 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1611 + %v =l loadl %ptr + jnz %v, @.L1612, @.L1613 + @.L1612 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1611 + @.L1613 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L1614 + %v =l loadl %ptr + jnz %v, @.L1615, @.L1616 + @.L1615 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 16 + jmp @.L1614 + @.L1616 + %ptr =l add %ptr, 32 + @.L1617 + %v =l loadl %ptr + jnz %v, @.L1618, @.L1619 + @.L1618 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 56 + jmp @.L1617 + @.L1619 + %ptr =l add %ptr, -24 + @.L1620 + %v =l loadl %ptr + jnz %v, @.L1621, @.L1622 + @.L1621 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1620 + @.L1622 + %ptr =l add %ptr, 8 + @.L1623 + %v =l loadl %ptr + jnz %v, @.L1624, @.L1625 + @.L1624 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1623 + @.L1625 + %ptr =l add %ptr, 8 + @.L1626 + %v =l loadl %ptr + jnz %v, @.L1627, @.L1628 + @.L1627 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1626 + @.L1628 + %ptr =l add %ptr, 8 + @.L1629 + %v =l loadl %ptr + jnz %v, @.L1630, @.L1631 + @.L1630 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1629 + @.L1631 + %ptr =l add %ptr, 8 + @.L1632 + %v =l loadl %ptr + jnz %v, @.L1633, @.L1634 + @.L1633 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1632 + @.L1634 + %ptr =l add %ptr, 8 + @.L1635 + %v =l loadl %ptr + jnz %v, @.L1636, @.L1637 + @.L1636 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1635 + @.L1637 + %ptr =l add %ptr, 24 + @.L1638 + %v =l loadl %ptr + jnz %v, @.L1639, @.L1640 + @.L1639 + %ptr =l add %ptr, 8 + @.L1641 + %v =l loadl %ptr + jnz %v, @.L1642, @.L1643 + @.L1642 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1641 + @.L1643 + %ptr =l add %ptr, 8 + @.L1644 + %v =l loadl %ptr + jnz %v, @.L1645, @.L1646 + @.L1645 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1644 + @.L1646 + %ptr =l add %ptr, 8 + @.L1647 + %v =l loadl %ptr + jnz %v, @.L1648, @.L1649 + @.L1648 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1647 + @.L1649 + %ptr =l add %ptr, 8 + @.L1650 + %v =l loadl %ptr + jnz %v, @.L1651, @.L1652 + @.L1651 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1650 + @.L1652 + %ptr =l add %ptr, 8 + @.L1653 + %v =l loadl %ptr + jnz %v, @.L1654, @.L1655 + @.L1654 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1653 + @.L1655 + %ptr =l add %ptr, 8 + @.L1656 + %v =l loadl %ptr + jnz %v, @.L1657, @.L1658 + @.L1657 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1656 + @.L1658 + %ptr =l add %ptr, 24 + jmp @.L1638 + @.L1640 + %ptr =l add %ptr, -72 + @.L1659 + %v =l loadl %ptr + jnz %v, @.L1660, @.L1661 + @.L1660 + %ptr =l add %ptr, -72 + jmp @.L1659 + @.L1661 + %ptr =l add %ptr, 72 + @.L1662 + %v =l loadl %ptr + jnz %v, @.L1663, @.L1664 + @.L1663 + %ptr =l add %ptr, 40 + @.L1665 + %v =l loadl %ptr + jnz %v, @.L1666, @.L1667 + @.L1666 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1665 + @.L1667 + %ptr =l add %ptr, 32 + jmp @.L1662 + @.L1664 + %ptr =l add %ptr, -72 + @.L1668 + %v =l loadl %ptr + jnz %v, @.L1669, @.L1670 + @.L1669 + %ptr =l add %ptr, -72 + jmp @.L1668 + @.L1670 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 11 + storel %v, %ptr + @.L1671 + %v =l loadl %ptr + jnz %v, @.L1672, @.L1673 + @.L1672 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L1674 + %v =l loadl %ptr + jnz %v, @.L1675, @.L1676 + @.L1675 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1674 + @.L1676 + %ptr =l add %ptr, 72 + jmp @.L1671 + @.L1673 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %ptr =l add %ptr, -48 + @.L1677 + %v =l loadl %ptr + jnz %v, @.L1678, @.L1679 + @.L1678 + %ptr =l add %ptr, -72 + jmp @.L1677 + @.L1679 + %ptr =l add %ptr, 56 + @.L1680 + %v =l loadl %ptr + jnz %v, @.L1681, @.L1682 + @.L1681 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1680 + @.L1682 + %ptr =l add %ptr, -56 + @.L1683 + %v =l loadl %ptr + jnz %v, @.L1684, @.L1685 + @.L1684 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L1686 + %v =l loadl %ptr + jnz %v, @.L1687, @.L1688 + @.L1687 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1686 + @.L1688 + %ptr =l add %ptr, 16 + @.L1689 + %v =l loadl %ptr + jnz %v, @.L1690, @.L1691 + @.L1690 + %ptr =l add %ptr, 72 + jmp @.L1689 + @.L1691 + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -32 + @.L1692 + %v =l loadl %ptr + jnz %v, @.L1693, @.L1694 + @.L1693 + %ptr =l add %ptr, 56 + @.L1695 + %v =l loadl %ptr + jnz %v, @.L1696, @.L1697 + @.L1696 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L1695 + @.L1697 + %ptr =l add %ptr, -48 + @.L1698 + %v =l loadl %ptr + jnz %v, @.L1699, @.L1700 + @.L1699 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + @.L1701 + %v =l loadl %ptr + jnz %v, @.L1702, @.L1703 + @.L1702 + %ptr =l add %ptr, -72 + jmp @.L1701 + @.L1703 + %ptr =l add %ptr, 56 + @.L1704 + %v =l loadl %ptr + jnz %v, @.L1705, @.L1706 + @.L1705 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1704 + @.L1706 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1698 + @.L1700 + %ptr =l add %ptr, -32 + %ptr =l add %ptr, -48 + jmp @.L1692 + @.L1694 + jmp @.L1683 + @.L1685 + %ptr =l add %ptr, 56 + @.L1707 + %v =l loadl %ptr + jnz %v, @.L1708, @.L1709 + @.L1708 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1707 + @.L1709 + %ptr =l add %ptr, -56 + @.L1710 + %v =l loadl %ptr + jnz %v, @.L1711, @.L1712 + @.L1711 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1713 + %v =l loadl %ptr + jnz %v, @.L1714, @.L1715 + @.L1714 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 32 + @.L1716 + %v =l loadl %ptr + jnz %v, @.L1717, @.L1718 + @.L1717 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 32 + jmp @.L1716 + @.L1718 + %ptr =l add %ptr, -32 + @.L1719 + %v =l loadl %ptr + jnz %v, @.L1720, @.L1721 + @.L1720 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 24 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -32 + jmp @.L1719 + @.L1721 + %ptr =l add %ptr, 64 + jmp @.L1713 + @.L1715 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + @.L1722 + %v =l loadl %ptr + jnz %v, @.L1723, @.L1724 + @.L1723 + %ptr =l add %ptr, 40 + @.L1725 + %v =l loadl %ptr + jnz %v, @.L1726, @.L1727 + @.L1726 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1725 + @.L1727 + %ptr =l add %ptr, -112 + jmp @.L1722 + @.L1724 + %ptr =l add %ptr, 72 + @.L1728 + %v =l loadl %ptr + jnz %v, @.L1729, @.L1730 + @.L1729 + %ptr =l add %ptr, 72 + jmp @.L1728 + @.L1730 + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -32 + @.L1731 + %v =l loadl %ptr + jnz %v, @.L1732, @.L1733 + @.L1732 + %ptr =l add %ptr, 8 + @.L1734 + %v =l loadl %ptr + jnz %v, @.L1735, @.L1736 + @.L1735 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1734 + @.L1736 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + @.L1737 + %v =l loadl %ptr + jnz %v, @.L1738, @.L1739 + @.L1738 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1740 + %v =l loadl %ptr + jnz %v, @.L1741, @.L1742 + @.L1741 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1740 + @.L1742 + %ptr =l add %ptr, -8 + @.L1743 + %v =l loadl %ptr + jnz %v, @.L1744, @.L1745 + @.L1744 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1743 + @.L1745 + %ptr =l add %ptr, 56 + jmp @.L1737 + @.L1739 + %ptr =l add %ptr, -48 + @.L1746 + %v =l loadl %ptr + jnz %v, @.L1747, @.L1748 + @.L1747 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + jmp @.L1746 + @.L1748 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1731 + @.L1733 + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -32 + @.L1749 + %v =l loadl %ptr + jnz %v, @.L1750, @.L1751 + @.L1750 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1749 + @.L1751 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1710 + @.L1712 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + @.L1752 + %v =l loadl %ptr + jnz %v, @.L1753, @.L1754 + @.L1753 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1752 + @.L1754 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + @.L1755 + %v =l loadl %ptr + jnz %v, @.L1756, @.L1757 + @.L1756 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1758 + %v =l loadl %ptr + jnz %v, @.L1759, @.L1760 + @.L1759 + %ptr =l add %ptr, 16 + %ptr =l add %ptr, 24 + @.L1761 + %v =l loadl %ptr + jnz %v, @.L1762, @.L1763 + @.L1762 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1761 + @.L1763 + %ptr =l add %ptr, 32 + jmp @.L1758 + @.L1760 + %ptr =l add %ptr, -72 + @.L1764 + %v =l loadl %ptr + jnz %v, @.L1765, @.L1766 + @.L1765 + %ptr =l add %ptr, 8 + @.L1767 + %v =l loadl %ptr + jnz %v, @.L1768, @.L1769 + @.L1768 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1767 + @.L1769 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + @.L1770 + %v =l loadl %ptr + jnz %v, @.L1771, @.L1772 + @.L1771 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1773 + %v =l loadl %ptr + jnz %v, @.L1774, @.L1775 + @.L1774 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 24 + jmp @.L1773 + @.L1775 + %ptr =l add %ptr, -8 + @.L1776 + %v =l loadl %ptr + jnz %v, @.L1777, @.L1778 + @.L1777 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1776 + @.L1778 + %ptr =l add %ptr, 56 + jmp @.L1770 + @.L1772 + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -32 + @.L1779 + %v =l loadl %ptr + jnz %v, @.L1780, @.L1781 + @.L1780 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + jmp @.L1779 + @.L1781 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1764 + @.L1766 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L1782 + %v =l loadl %ptr + jnz %v, @.L1783, @.L1784 + @.L1783 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L1785 + %v =l loadl %ptr + jnz %v, @.L1786, @.L1787 + @.L1786 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1785 + @.L1787 + %ptr =l add %ptr, 72 + jmp @.L1782 + @.L1784 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + %ptr =l add %ptr, -16 + @.L1788 + %v =l loadl %ptr + jnz %v, @.L1789, @.L1790 + @.L1789 + %ptr =l add %ptr, -72 + jmp @.L1788 + @.L1790 + %ptr =l add %ptr, 72 + @.L1791 + %v =l loadl %ptr + jnz %v, @.L1792, @.L1793 + @.L1792 + %ptr =l add %ptr, 40 + @.L1794 + %v =l loadl %ptr + jnz %v, @.L1795, @.L1796 + @.L1795 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1794 + @.L1796 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L1797 + %v =l loadl %ptr + jnz %v, @.L1798, @.L1799 + @.L1798 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1800 + %v =l loadl %ptr + jnz %v, @.L1801, @.L1802 + @.L1801 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1800 + @.L1802 + %ptr =l add %ptr, -32 + %ptr =l add %ptr, -24 + @.L1803 + %v =l loadl %ptr + jnz %v, @.L1804, @.L1805 + @.L1804 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -128 + @.L1806 + %v =l loadl %ptr + jnz %v, @.L1807, @.L1808 + @.L1807 + %ptr =l add %ptr, -72 + jmp @.L1806 + @.L1808 + %ptr =l add %ptr, 32 + @.L1809 + %v =l loadl %ptr + jnz %v, @.L1810, @.L1811 + @.L1810 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1809 + @.L1811 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1812 + %v =l loadl %ptr + jnz %v, @.L1813, @.L1814 + @.L1813 + %ptr =l add %ptr, 72 + jmp @.L1812 + @.L1814 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1803 + @.L1805 + jmp @.L1797 + @.L1799 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 56 + @.L1815 + %v =l loadl %ptr + jnz %v, @.L1816, @.L1817 + @.L1816 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + jmp @.L1815 + @.L1817 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + @.L1818 + %v =l loadl %ptr + jnz %v, @.L1819, @.L1820 + @.L1819 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L1821 + %v =l loadl %ptr + jnz %v, @.L1822, @.L1823 + @.L1822 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1821 + @.L1823 + %ptr =l add %ptr, -40 + @.L1824 + %v =l loadl %ptr + jnz %v, @.L1825, @.L1826 + @.L1825 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -112 + @.L1827 + %v =l loadl %ptr + jnz %v, @.L1828, @.L1829 + @.L1828 + %ptr =l add %ptr, -24 + %ptr =l add %ptr, -48 + jmp @.L1827 + @.L1829 + %ptr =l add %ptr, 24 + @.L1830 + %v =l loadl %ptr + jnz %v, @.L1831, @.L1832 + @.L1831 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1830 + @.L1832 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L1833 + %v =l loadl %ptr + jnz %v, @.L1834, @.L1835 + @.L1834 + %ptr =l add %ptr, 72 + jmp @.L1833 + @.L1835 + %ptr =l add %ptr, 8 + @.L1836 + %v =l loadl %ptr + jnz %v, @.L1837, @.L1838 + @.L1837 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1836 + @.L1838 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1824 + @.L1826 + jmp @.L1818 + @.L1820 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1839 + %v =l loadl %ptr + jnz %v, @.L1840, @.L1841 + @.L1840 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L1842 + %v =l loadl %ptr + jnz %v, @.L1843, @.L1844 + @.L1843 + %ptr =l add %ptr, 72 + jmp @.L1842 + @.L1844 + %ptr =l add %ptr, -64 + jmp @.L1839 + @.L1841 + %ptr =l add %ptr, 64 + jmp @.L1791 + @.L1793 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -16 + @.L1845 + %v =l loadl %ptr + jnz %v, @.L1846, @.L1847 + @.L1846 + %ptr =l add %ptr, -72 + jmp @.L1845 + @.L1847 + %ptr =l add %ptr, 32 + @.L1848 + %v =l loadl %ptr + jnz %v, @.L1849, @.L1850 + @.L1849 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1848 + @.L1850 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L1851 + %v =l loadl %ptr + jnz %v, @.L1852, @.L1853 + @.L1852 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L1854 + %v =l loadl %ptr + jnz %v, @.L1855, @.L1856 + @.L1855 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1854 + @.L1856 + %ptr =l add %ptr, 72 + jmp @.L1851 + @.L1853 + %ptr =l add %ptr, 32 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L1857 + %v =l loadl %ptr + jnz %v, @.L1858, @.L1859 + @.L1858 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -16 + jmp @.L1857 + @.L1859 + jmp @.L1755 + @.L1757 + %ptr =l add %ptr, 24 + jmp @.L66 + @.L68 + %ptr =l add %ptr, -32 + %v =l loadl %ptr + %r =l call $putchar(l %v) + %ptr =l add %ptr, 80 + @.L1860 + %v =l loadl %ptr + jnz %v, @.L1861, @.L1862 + @.L1861 + %ptr =l add %ptr, 48 + @.L1863 + %v =l loadl %ptr + jnz %v, @.L1864, @.L1865 + @.L1864 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1863 + @.L1865 + %ptr =l add %ptr, 24 + jmp @.L1860 + @.L1862 + %ptr =l add %ptr, -72 + @.L1866 + %v =l loadl %ptr + jnz %v, @.L1867, @.L1868 + @.L1867 + %ptr =l add %ptr, -72 + jmp @.L1866 + @.L1868 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 10 + storel %v, %ptr + @.L1869 + %v =l loadl %ptr + jnz %v, @.L1870, @.L1871 + @.L1870 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L1872 + %v =l loadl %ptr + jnz %v, @.L1873, @.L1874 + @.L1873 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1872 + @.L1874 + %ptr =l add %ptr, 72 + jmp @.L1869 + @.L1871 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -120 + @.L1875 + %v =l loadl %ptr + jnz %v, @.L1876, @.L1877 + @.L1876 + %ptr =l add %ptr, -72 + jmp @.L1875 + @.L1877 + %ptr =l add %ptr, 64 + @.L1878 + %v =l loadl %ptr + jnz %v, @.L1879, @.L1880 + @.L1879 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1878 + @.L1880 + %ptr =l add %ptr, -64 + @.L1881 + %v =l loadl %ptr + jnz %v, @.L1882, @.L1883 + @.L1882 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L1884 + %v =l loadl %ptr + jnz %v, @.L1885, @.L1886 + @.L1885 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1884 + @.L1886 + %ptr =l add %ptr, 8 + @.L1887 + %v =l loadl %ptr + jnz %v, @.L1888, @.L1889 + @.L1888 + %ptr =l add %ptr, 72 + jmp @.L1887 + @.L1889 + %ptr =l add %ptr, -72 + @.L1890 + %v =l loadl %ptr + jnz %v, @.L1891, @.L1892 + @.L1891 + %ptr =l add %ptr, 64 + @.L1893 + %v =l loadl %ptr + jnz %v, @.L1894, @.L1895 + @.L1894 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %ptr =l add %ptr, 8 + jmp @.L1893 + @.L1895 + %ptr =l add %ptr, -56 + @.L1896 + %v =l loadl %ptr + jnz %v, @.L1897, @.L1898 + @.L1897 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -64 + @.L1899 + %v =l loadl %ptr + jnz %v, @.L1900, @.L1901 + @.L1900 + %ptr =l add %ptr, -72 + jmp @.L1899 + @.L1901 + %ptr =l add %ptr, 64 + @.L1902 + %v =l loadl %ptr + jnz %v, @.L1903, @.L1904 + @.L1903 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1902 + @.L1904 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1896 + @.L1898 + %ptr =l add %ptr, -80 + jmp @.L1890 + @.L1892 + jmp @.L1881 + @.L1883 + %ptr =l add %ptr, 64 + @.L1905 + %v =l loadl %ptr + jnz %v, @.L1906, @.L1907 + @.L1906 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1905 + @.L1907 + %ptr =l add %ptr, -64 + @.L1908 + %v =l loadl %ptr + jnz %v, @.L1909, @.L1910 + @.L1909 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1911 + %v =l loadl %ptr + jnz %v, @.L1912, @.L1913 + @.L1912 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L1914 + %v =l loadl %ptr + jnz %v, @.L1915, @.L1916 + @.L1915 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + jmp @.L1914 + @.L1916 + %ptr =l add %ptr, -40 + @.L1917 + %v =l loadl %ptr + jnz %v, @.L1918, @.L1919 + @.L1918 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -40 + jmp @.L1917 + @.L1919 + %ptr =l add %ptr, 48 + %ptr =l add %ptr, 16 + jmp @.L1911 + @.L1913 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -64 + @.L1920 + %v =l loadl %ptr + jnz %v, @.L1921, @.L1922 + @.L1921 + %ptr =l add %ptr, 48 + @.L1923 + %v =l loadl %ptr + jnz %v, @.L1924, @.L1925 + @.L1924 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1923 + @.L1925 + %ptr =l add %ptr, -120 + jmp @.L1920 + @.L1922 + %ptr =l add %ptr, 72 + @.L1926 + %v =l loadl %ptr + jnz %v, @.L1927, @.L1928 + @.L1927 + %ptr =l add %ptr, 72 + jmp @.L1926 + @.L1928 + %ptr =l add %ptr, -72 + @.L1929 + %v =l loadl %ptr + jnz %v, @.L1930, @.L1931 + @.L1930 + %ptr =l add %ptr, 8 + @.L1932 + %v =l loadl %ptr + jnz %v, @.L1933, @.L1934 + @.L1933 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1932 + @.L1934 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + @.L1935 + %v =l loadl %ptr + jnz %v, @.L1936, @.L1937 + @.L1936 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1938 + %v =l loadl %ptr + jnz %v, @.L1939, @.L1940 + @.L1939 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1938 + @.L1940 + %ptr =l add %ptr, -8 + @.L1941 + %v =l loadl %ptr + jnz %v, @.L1942, @.L1943 + @.L1942 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1941 + @.L1943 + %ptr =l add %ptr, 64 + jmp @.L1935 + @.L1937 + %ptr =l add %ptr, -56 + @.L1944 + %v =l loadl %ptr + jnz %v, @.L1945, @.L1946 + @.L1945 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + jmp @.L1944 + @.L1946 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %ptr =l add %ptr, -24 + jmp @.L1929 + @.L1931 + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -40 + @.L1947 + %v =l loadl %ptr + jnz %v, @.L1948, @.L1949 + @.L1948 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1947 + @.L1949 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -24 + jmp @.L1908 + @.L1910 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + @.L1950 + %v =l loadl %ptr + jnz %v, @.L1951, @.L1952 + @.L1951 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1950 + @.L1952 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -64 + @.L1953 + %v =l loadl %ptr + jnz %v, @.L1954, @.L1955 + @.L1954 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1956 + %v =l loadl %ptr + jnz %v, @.L1957, @.L1958 + @.L1957 + %ptr =l add %ptr, 24 + %ptr =l add %ptr, 24 + @.L1959 + %v =l loadl %ptr + jnz %v, @.L1960, @.L1961 + @.L1960 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -16 + jmp @.L1959 + @.L1961 + %ptr =l add %ptr, 24 + jmp @.L1956 + @.L1958 + %ptr =l add %ptr, -72 + @.L1962 + %v =l loadl %ptr + jnz %v, @.L1963, @.L1964 + @.L1963 + %ptr =l add %ptr, 8 + @.L1965 + %v =l loadl %ptr + jnz %v, @.L1966, @.L1967 + @.L1966 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L1965 + @.L1967 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + @.L1968 + %v =l loadl %ptr + jnz %v, @.L1969, @.L1970 + @.L1969 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L1971 + %v =l loadl %ptr + jnz %v, @.L1972, @.L1973 + @.L1972 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 16 + jmp @.L1971 + @.L1973 + %ptr =l add %ptr, -8 + @.L1974 + %v =l loadl %ptr + jnz %v, @.L1975, @.L1976 + @.L1975 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L1974 + @.L1976 + %ptr =l add %ptr, 64 + jmp @.L1968 + @.L1970 + %ptr =l add %ptr, -16 + %ptr =l add %ptr, -40 + @.L1977 + %v =l loadl %ptr + jnz %v, @.L1978, @.L1979 + @.L1978 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 56 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -56 + jmp @.L1977 + @.L1979 + %ptr =l add %ptr, -8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1962 + @.L1964 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 5 + storel %v, %ptr + @.L1980 + %v =l loadl %ptr + jnz %v, @.L1981, @.L1982 + @.L1981 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L1983 + %v =l loadl %ptr + jnz %v, @.L1984, @.L1985 + @.L1984 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L1983 + @.L1985 + %ptr =l add %ptr, 72 + jmp @.L1980 + @.L1982 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 216 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L1986 + %v =l loadl %ptr + jnz %v, @.L1987, @.L1988 + @.L1987 + %ptr =l add %ptr, -72 + jmp @.L1986 + @.L1988 + %ptr =l add %ptr, 72 + @.L1989 + %v =l loadl %ptr + jnz %v, @.L1990, @.L1991 + @.L1990 + %ptr =l add %ptr, 48 + @.L1992 + %v =l loadl %ptr + jnz %v, @.L1993, @.L1994 + @.L1993 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L1992 + @.L1994 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + %ptr =l add %ptr, -40 + @.L1995 + %v =l loadl %ptr + jnz %v, @.L1996, @.L1997 + @.L1996 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 16 + @.L1998 + %v =l loadl %ptr + jnz %v, @.L1999, @.L2000 + @.L1999 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L1998 + @.L2000 + %ptr =l add %ptr, -64 + @.L2001 + %v =l loadl %ptr + jnz %v, @.L2002, @.L2003 + @.L2002 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -136 + @.L2004 + %v =l loadl %ptr + jnz %v, @.L2005, @.L2006 + @.L2005 + %ptr =l add %ptr, -56 + %ptr =l add %ptr, -16 + jmp @.L2004 + @.L2006 + %ptr =l add %ptr, 32 + @.L2007 + %v =l loadl %ptr + jnz %v, @.L2008, @.L2009 + @.L2008 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L2007 + @.L2009 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 40 + @.L2010 + %v =l loadl %ptr + jnz %v, @.L2011, @.L2012 + @.L2011 + %ptr =l add %ptr, 72 + jmp @.L2010 + @.L2012 + %ptr =l add %ptr, 8 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L2001 + @.L2003 + jmp @.L1995 + @.L1997 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 64 + @.L2013 + %v =l loadl %ptr + jnz %v, @.L2014, @.L2015 + @.L2014 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -64 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + jmp @.L2013 + @.L2015 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -64 + @.L2016 + %v =l loadl %ptr + jnz %v, @.L2017, @.L2018 + @.L2017 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 64 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -16 + @.L2019 + %v =l loadl %ptr + jnz %v, @.L2020, @.L2021 + @.L2020 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + jmp @.L2019 + @.L2021 + %ptr =l add %ptr, -48 + @.L2022 + %v =l loadl %ptr + jnz %v, @.L2023, @.L2024 + @.L2023 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 48 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -120 + @.L2025 + %v =l loadl %ptr + jnz %v, @.L2026, @.L2027 + @.L2026 + %ptr =l add %ptr, -72 + jmp @.L2025 + @.L2027 + %ptr =l add %ptr, 24 + @.L2028 + %v =l loadl %ptr + jnz %v, @.L2029, @.L2030 + @.L2029 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L2028 + @.L2030 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 48 + @.L2031 + %v =l loadl %ptr + jnz %v, @.L2032, @.L2033 + @.L2032 + %ptr =l add %ptr, 48 + %ptr =l add %ptr, 24 + jmp @.L2031 + @.L2033 + %ptr =l add %ptr, 8 + @.L2034 + %v =l loadl %ptr + jnz %v, @.L2035, @.L2036 + @.L2035 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L2034 + @.L2036 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -8 + jmp @.L2022 + @.L2024 + jmp @.L2016 + @.L2018 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, 8 + @.L2037 + %v =l loadl %ptr + jnz %v, @.L2038, @.L2039 + @.L2038 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -8 + @.L2040 + %v =l loadl %ptr + jnz %v, @.L2041, @.L2042 + @.L2041 + %ptr =l add %ptr, 72 + jmp @.L2040 + @.L2042 + %ptr =l add %ptr, -64 + jmp @.L2037 + @.L2039 + %ptr =l add %ptr, 64 + jmp @.L1989 + @.L1991 + %ptr =l add %ptr, -72 + @.L2043 + %v =l loadl %ptr + jnz %v, @.L2044, @.L2045 + @.L2044 + %ptr =l add %ptr, -72 + jmp @.L2043 + @.L2045 + %ptr =l add %ptr, 32 + @.L2046 + %v =l loadl %ptr + jnz %v, @.L2047, @.L2048 + @.L2047 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + jmp @.L2046 + @.L2048 + %ptr =l add %ptr, -24 + %v =l loadl %ptr + %v =l add %v, 4 + storel %v, %ptr + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + @.L2049 + %v =l loadl %ptr + jnz %v, @.L2050, @.L2051 + @.L2050 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + @.L2052 + %v =l loadl %ptr + jnz %v, @.L2053, @.L2054 + @.L2053 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 72 + %v =l loadl %ptr + %v =l add %v, 1 + storel %v, %ptr + %ptr =l add %ptr, -72 + jmp @.L2052 + @.L2054 + %ptr =l add %ptr, 72 + jmp @.L2049 + @.L2051 + %ptr =l add %ptr, 40 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, 216 + %v =l loadl %ptr + %v =l add %v, -1 + storel %v, %ptr + %ptr =l add %ptr, -48 + @.L2055 + %v =l loadl %ptr + jnz %v, @.L2056, @.L2057 + @.L2056 + %ptr =l add %ptr, -32 + %ptr =l add %ptr, -40 + jmp @.L2055 + @.L2057 + jmp @.L1953 + @.L1955 + %ptr =l add %ptr, 24 + jmp @.L36 + @.L38 + @end + ret 0 +} diff --git a/src/qbe/test/_chacha20.ssa b/src/qbe/test/_chacha20.ssa new file mode 100644 index 00000000..9b257452 --- /dev/null +++ b/src/qbe/test/_chacha20.ssa @@ -0,0 +1,233 @@ +export function $chacha20_rounds_qbe(l %out, l %in) { +@start + %t0 =w loadw %in + %in =l add %in, 4 + %t1 =w loadw %in + %in =l add %in, 4 + %t2 =w loadw %in + %in =l add %in, 4 + %t3 =w loadw %in + %in =l add %in, 4 + %t4 =w loadw %in + %in =l add %in, 4 + %t5 =w loadw %in + %in =l add %in, 4 + %t6 =w loadw %in + %in =l add %in, 4 + %t7 =w loadw %in + %in =l add %in, 4 + %t8 =w loadw %in + %in =l add %in, 4 + %t9 =w loadw %in + %in =l add %in, 4 + %t10 =w loadw %in + %in =l add %in, 4 + %t11 =w loadw %in + %in =l add %in, 4 + %t12 =w loadw %in + %in =l add %in, 4 + %t13 =w loadw %in + %in =l add %in, 4 + %t14 =w loadw %in + %in =l add %in, 4 + %t15 =w loadw %in + %in =l add %in, 4 + %counter =w copy 10 +@loop + %t0 =w add %t0, %t4 + %t12 =w xor %t12, %t0 + %rotl32_a =w shl %t12, 16 + %rotl32_b =w shr %t12, 16 + %t12 =w xor %rotl32_a, %rotl32_b + %t8 =w add %t8, %t12 + %t4 =w xor %t4, %t8 + %rotl32_a =w shl %t4, 12 + %rotl32_b =w shr %t4, 20 + %t4 =w xor %rotl32_a, %rotl32_b + %t0 =w add %t0, %t4 + %t12 =w xor %t12, %t0 + %rotl32_a =w shl %t12, 8 + %rotl32_b =w shr %t12, 24 + %t12 =w xor %rotl32_a, %rotl32_b + %t8 =w add %t8, %t12 + %t4 =w xor %t4, %t8 + %rotl32_a =w shl %t4, 7 + %rotl32_b =w shr %t4, 25 + %t4 =w xor %rotl32_a, %rotl32_b + %t1 =w add %t1, %t5 + %t13 =w xor %t13, %t1 + %rotl32_a =w shl %t13, 16 + %rotl32_b =w shr %t13, 16 + %t13 =w xor %rotl32_a, %rotl32_b + %t9 =w add %t9, %t13 + %t5 =w xor %t5, %t9 + %rotl32_a =w shl %t5, 12 + %rotl32_b =w shr %t5, 20 + %t5 =w xor %rotl32_a, %rotl32_b + %t1 =w add %t1, %t5 + %t13 =w xor %t13, %t1 + %rotl32_a =w shl %t13, 8 + %rotl32_b =w shr %t13, 24 + %t13 =w xor %rotl32_a, %rotl32_b + %t9 =w add %t9, %t13 + %t5 =w xor %t5, %t9 + %rotl32_a =w shl %t5, 7 + %rotl32_b =w shr %t5, 25 + %t5 =w xor %rotl32_a, %rotl32_b + %t2 =w add %t2, %t6 + %t14 =w xor %t14, %t2 + %rotl32_a =w shl %t14, 16 + %rotl32_b =w shr %t14, 16 + %t14 =w xor %rotl32_a, %rotl32_b + %t10 =w add %t10, %t14 + %t6 =w xor %t6, %t10 + %rotl32_a =w shl %t6, 12 + %rotl32_b =w shr %t6, 20 + %t6 =w xor %rotl32_a, %rotl32_b + %t2 =w add %t2, %t6 + %t14 =w xor %t14, %t2 + %rotl32_a =w shl %t14, 8 + %rotl32_b =w shr %t14, 24 + %t14 =w xor %rotl32_a, %rotl32_b + %t10 =w add %t10, %t14 + %t6 =w xor %t6, %t10 + %rotl32_a =w shl %t6, 7 + %rotl32_b =w shr %t6, 25 + %t6 =w xor %rotl32_a, %rotl32_b + %t3 =w add %t3, %t7 + %t15 =w xor %t15, %t3 + %rotl32_a =w shl %t15, 16 + %rotl32_b =w shr %t15, 16 + %t15 =w xor %rotl32_a, %rotl32_b + %t11 =w add %t11, %t15 + %t7 =w xor %t7, %t11 + %rotl32_a =w shl %t7, 12 + %rotl32_b =w shr %t7, 20 + %t7 =w xor %rotl32_a, %rotl32_b + %t3 =w add %t3, %t7 + %t15 =w xor %t15, %t3 + %rotl32_a =w shl %t15, 8 + %rotl32_b =w shr %t15, 24 + %t15 =w xor %rotl32_a, %rotl32_b + %t11 =w add %t11, %t15 + %t7 =w xor %t7, %t11 + %rotl32_a =w shl %t7, 7 + %rotl32_b =w shr %t7, 25 + %t7 =w xor %rotl32_a, %rotl32_b + %t0 =w add %t0, %t5 + %t15 =w xor %t15, %t0 + %rotl32_a =w shl %t15, 16 + %rotl32_b =w shr %t15, 16 + %t15 =w xor %rotl32_a, %rotl32_b + %t10 =w add %t10, %t15 + %t5 =w xor %t5, %t10 + %rotl32_a =w shl %t5, 12 + %rotl32_b =w shr %t5, 20 + %t5 =w xor %rotl32_a, %rotl32_b + %t0 =w add %t0, %t5 + %t15 =w xor %t15, %t0 + %rotl32_a =w shl %t15, 8 + %rotl32_b =w shr %t15, 24 + %t15 =w xor %rotl32_a, %rotl32_b + %t10 =w add %t10, %t15 + %t5 =w xor %t5, %t10 + %rotl32_a =w shl %t5, 7 + %rotl32_b =w shr %t5, 25 + %t5 =w xor %rotl32_a, %rotl32_b + %t1 =w add %t1, %t6 + %t12 =w xor %t12, %t1 + %rotl32_a =w shl %t12, 16 + %rotl32_b =w shr %t12, 16 + %t12 =w xor %rotl32_a, %rotl32_b + %t11 =w add %t11, %t12 + %t6 =w xor %t6, %t11 + %rotl32_a =w shl %t6, 12 + %rotl32_b =w shr %t6, 20 + %t6 =w xor %rotl32_a, %rotl32_b + %t1 =w add %t1, %t6 + %t12 =w xor %t12, %t1 + %rotl32_a =w shl %t12, 8 + %rotl32_b =w shr %t12, 24 + %t12 =w xor %rotl32_a, %rotl32_b + %t11 =w add %t11, %t12 + %t6 =w xor %t6, %t11 + %rotl32_a =w shl %t6, 7 + %rotl32_b =w shr %t6, 25 + %t6 =w xor %rotl32_a, %rotl32_b + %t2 =w add %t2, %t7 + %t13 =w xor %t13, %t2 + %rotl32_a =w shl %t13, 16 + %rotl32_b =w shr %t13, 16 + %t13 =w xor %rotl32_a, %rotl32_b + %t8 =w add %t8, %t13 + %t7 =w xor %t7, %t8 + %rotl32_a =w shl %t7, 12 + %rotl32_b =w shr %t7, 20 + %t7 =w xor %rotl32_a, %rotl32_b + %t2 =w add %t2, %t7 + %t13 =w xor %t13, %t2 + %rotl32_a =w shl %t13, 8 + %rotl32_b =w shr %t13, 24 + %t13 =w xor %rotl32_a, %rotl32_b + %t8 =w add %t8, %t13 + %t7 =w xor %t7, %t8 + %rotl32_a =w shl %t7, 7 + %rotl32_b =w shr %t7, 25 + %t7 =w xor %rotl32_a, %rotl32_b + %t3 =w add %t3, %t4 + %t14 =w xor %t14, %t3 + %rotl32_a =w shl %t14, 16 + %rotl32_b =w shr %t14, 16 + %t14 =w xor %rotl32_a, %rotl32_b + %t9 =w add %t9, %t14 + %t4 =w xor %t4, %t9 + %rotl32_a =w shl %t4, 12 + %rotl32_b =w shr %t4, 20 + %t4 =w xor %rotl32_a, %rotl32_b + %t3 =w add %t3, %t4 + %t14 =w xor %t14, %t3 + %rotl32_a =w shl %t14, 8 + %rotl32_b =w shr %t14, 24 + %t14 =w xor %rotl32_a, %rotl32_b + %t9 =w add %t9, %t14 + %t4 =w xor %t4, %t9 + %rotl32_a =w shl %t4, 7 + %rotl32_b =w shr %t4, 25 + %t4 =w xor %rotl32_a, %rotl32_b + %counter =w sub %counter, 10 + jnz %counter, @loop, @done +@done + storew %t0, %out + %out =l add %out, 4 + storew %t1, %out + %out =l add %out, 4 + storew %t2, %out + %out =l add %out, 4 + storew %t3, %out + %out =l add %out, 4 + storew %t4, %out + %out =l add %out, 4 + storew %t5, %out + %out =l add %out, 4 + storew %t6, %out + %out =l add %out, 4 + storew %t7, %out + %out =l add %out, 4 + storew %t8, %out + %out =l add %out, 4 + storew %t9, %out + %out =l add %out, 4 + storew %t10, %out + %out =l add %out, 4 + storew %t11, %out + %out =l add %out, 4 + storew %t12, %out + %out =l add %out, 4 + storew %t13, %out + %out =l add %out, 4 + storew %t14, %out + %out =l add %out, 4 + storew %t15, %out + %out =l add %out, 4 + ret +} diff --git a/src/qbe/test/_dragon.ssa b/src/qbe/test/_dragon.ssa new file mode 100644 index 00000000..b169e1ba --- /dev/null +++ b/src/qbe/test/_dragon.ssa @@ -0,0 +1,33 @@ +# a moderately complex test for +# dominators computation from +# the dragon book +# because branching is limited to +# two, I had to split some blocks + +function $dragon() { +@start +@b1 + jnz 0, @b2, @b3 +@b2 + jmp @b3 +@b3 + jmp @b4.1 +@b4.1 + jnz 0, @b3, @b4.2 +@b4.2 + jnz 0, @b5, @b6 +@b5 + jmp @b7 +@b6 + jmp @b7 +@b7 + jnz 0, @b8.1, @b4.1 +@b8.1 + jnz 0, @b3, @b8.2 +@b8.2 + jnz 0, @b9, @b10 +@b9 + jmp @b1 +@b10 + jmp @b7 +} diff --git a/src/qbe/test/_fix1.ssa b/src/qbe/test/_fix1.ssa new file mode 100644 index 00000000..e89307fb --- /dev/null +++ b/src/qbe/test/_fix1.ssa @@ -0,0 +1,15 @@ +function $test() { +@start + %x =w copy 1 +@loop + jnz %x, @noz, @isz +@noz + %x =w copy 0 + jmp @end +@isz + %x =w copy 1 + jmp @loop +@end + %z =w add 10, %x + ret +} diff --git a/src/qbe/test/_fix2.ssa b/src/qbe/test/_fix2.ssa new file mode 100644 index 00000000..89f236d2 --- /dev/null +++ b/src/qbe/test/_fix2.ssa @@ -0,0 +1,15 @@ +function $test() { +@start + %x =w copy 1 +@loop + jnz %x, @noz, @isz +@noz + %x =w copy 0 + jnz %x, @loop, @end +@isz + %x =w copy 1 + jmp @loop +@end + %z =w add 10, %x + ret +} diff --git a/src/qbe/test/_fix3.ssa b/src/qbe/test/_fix3.ssa new file mode 100644 index 00000000..283e5a1c --- /dev/null +++ b/src/qbe/test/_fix3.ssa @@ -0,0 +1,20 @@ +function w $test() { +@start + %x =w copy 100 + %s =w copy 0 +@l + %c =w cslew %x, 10 + jnz %c, @a, @b +@a + %s =w add %s, %x + %x =w sub %x, 1 + jmp @c +@b + %s =w sub %s, %x + jmp @c +@c + %x =w sub %x, 1 + jnz %x, @l, @end +@end + ret %s +} diff --git a/src/qbe/test/_fix4.ssa b/src/qbe/test/_fix4.ssa new file mode 100644 index 00000000..181768dd --- /dev/null +++ b/src/qbe/test/_fix4.ssa @@ -0,0 +1,27 @@ +function $test() { +@start + %x =w copy 3 + %n =w copy 2 +@loop + %c =w ceqw %n, 10000 + jnz %c, @end, @next +@next + %t =w copy 3 + %x =w add %x, 2 +@tloop + %s =w mul %t, %t + %c =w csgtw %s, %x + jnz %c, @prime, @test +@test + %r =w rem %x, %t + jnz %r, @tnext, @loop +@tnext + %t =w add %t, 2 + jmp @tloop +@prime + %n =w add %n, 1 + jmp @loop +@end + storew %x, $a + ret +} diff --git a/src/qbe/test/_gcm1.ssa b/src/qbe/test/_gcm1.ssa new file mode 100644 index 00000000..719cddb7 --- /dev/null +++ b/src/qbe/test/_gcm1.ssa @@ -0,0 +1,48 @@ +export +function w $ifmv(w %p1, w %p2, w %p3) { +@start +@entry + %rt =w add %p2, %p3 # gcm moves to @true + %rf =w sub %p2, %p3 # gcm moves to @false + jnz %p1, @true, @false +@true + %r =w copy %rt + jmp @exit +@false + %r =w copy %rf + jmp @exit +@exit + ret %r +} + +export +function w $hoist1(w %p1, w %p2, w %p3) { +@start +@entry + %n =w copy 0 + %i =w copy %p1 +@loop + %base =w add %p2, %p3 # gcm moves to @exit + %i =w sub %i, 1 + %n =w add %n, 1 + jnz %i, @loop, @exit +@exit + %r =w add %base, %n + ret %r +} + +export +function w $hoist2(w %p1, w %p2, w %p3) { +@start +@entry + %n =w copy 0 + %i =w copy %p1 +@loop + %base =w add %p2, %p3 # gcm moves to @entry + %i =w sub %i, 1 + %n =w add %n, %base + jnz %i, @loop, @exit +@exit + %r =w add %base, %n + ret %r +} diff --git a/src/qbe/test/_gcm2.ssa b/src/qbe/test/_gcm2.ssa new file mode 100644 index 00000000..baeb4a4f --- /dev/null +++ b/src/qbe/test/_gcm2.ssa @@ -0,0 +1,43 @@ +# Programs from "Global Code Motion Global Value Numbering" by Cliff Click +# https://courses.cs.washington.edu/courses/cse501/06wi/reading/click-pldi95.pdf + +# GCM program in Figure 1 + +function w $gcm_test(w %a){ +@start + %i.0 =w copy 0 +@loop + %i.1 =w phi @start %i.0, @loop %i.2 + %b =w add %a, 1 # early schedule moves to @start + %i.2 =w add %i.1, %b + %c =w mul %i.2, 2 # late schedule moves to @end + %x =w csltw %i.2, 10 + jnz %x, @loop, @end +@end + ret %c +} + +# GCM program in "Figure 3 x's definition does not dominate it's use" +# +# SSA contruction will insert phi instruction for "x" in @if_false +# preventing the "add" in @if_false from being moved to @if_true + +function $gcm_test2 (w %a){ +@start + %f =w copy 1 + %x =w copy 0 + %s.0 =w copy 0 +@loop + %s.1 = w phi @start %s.0, @if_false %s.2 + jnz %a, @if, @end +@if + jnz %f, @if_true, @if_false +@if_true + %f =w copy 0 + %x =w add %x, 1 +@if_false + %s.2 =w add %s.1, %x + jmp @loop +@end + ret +} diff --git a/src/qbe/test/_live.ssa b/src/qbe/test/_live.ssa new file mode 100644 index 00000000..fce4cb9a --- /dev/null +++ b/src/qbe/test/_live.ssa @@ -0,0 +1,21 @@ +# this control flow graph is irreducible +# yet, we expecet the liveness analysis +# to work properly and make %x live in +# the block @left +# +# nothing should ever be live at the entry + +function $test() { +@start + %b =w copy 0 + %x =w copy 10 + jnz 0, @loop, @left +@left + jmp @inloop +@loop + %x1 =w add %x, 1 +@inloop + %b1 =w add %b, 1 +@endloop + jmp @loop +} diff --git a/src/qbe/test/_load-elim.ssa b/src/qbe/test/_load-elim.ssa new file mode 100644 index 00000000..faae4786 --- /dev/null +++ b/src/qbe/test/_load-elim.ssa @@ -0,0 +1,17 @@ +# GCM can eliminate unused add/load instructions + +export +function w $f(l %p, w %c) { +@start + jnz %c, @true, @false +@true + %p1 =l add %p, 4 + %v1 =w loaduw %p1 + jmp @end +@false + %p2 =l add %p, 4 + %v2 =w loaduw %p2 + jmp @end +@end + ret 0 +} diff --git a/src/qbe/test/_rpo.ssa b/src/qbe/test/_rpo.ssa new file mode 100644 index 00000000..a10c6b1a --- /dev/null +++ b/src/qbe/test/_rpo.ssa @@ -0,0 +1,12 @@ +function $test() { +@start + jmp @foo +@baz + jnz 1, @end, @foo +@bar + jmp @end +@foo + jnz 0, @bar, @baz +@end + ret +} diff --git a/src/qbe/test/_slow.qbe b/src/qbe/test/_slow.qbe new file mode 100644 index 00000000..a411e419 --- /dev/null +++ b/src/qbe/test/_slow.qbe @@ -0,0 +1,35762 @@ +function w $safe_unary_minus_func_int8_t_s(w %.1) { +@start.1 + %.2 =l alloc4 1 + storeb %.1, %.2 +@body.2 + %.3 =w loadsb %.2 + %.4 =w extsb %.3 + %.5 =w sub 0, 128 + %.6 =w ceqw %.4, %.5 + %.7 =w cnew %.6, 0 + jnz %.7, @cond_true.3, @cond_false.4 +@cond_true.3 + %.8 =w loadsb %.2 + %.9 =w extsb %.8 + jmp @cond_join.5 +@cond_false.4 + %.10 =w loadsb %.2 + %.11 =w extsb %.10 + %.12 =w sub 0, %.11 +@cond_join.5 + %.13 =w phi @cond_true.3 %.9, @cond_false.4 %.12 + %.14 =w copy %.13 + ret %.14 +} +function w $safe_add_func_int8_t_s_s(w %.1, w %.3) { +@start.6 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.7 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csgtw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.15, @logic_join.16 +@logic_right.15 + %.9 =w loadsb %.4 + %.10 =w extsb %.9 + %.11 =w csgtw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.16 + %.13 =w phi @body.7 %.8, @logic_right.15 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.13, @logic_join.14 +@logic_right.13 + %.15 =w loadsb %.2 + %.16 =w extsb %.15 + %.17 =w loadsb %.4 + %.18 =w extsb %.17 + %.19 =w sub 127, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.14 + %.22 =w phi @logic_join.16 %.14, @logic_right.13 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @logic_join.12, @logic_right.11 +@logic_right.11 + %.24 =w loadsb %.2 + %.25 =w extsb %.24 + %.26 =w csltw %.25, 0 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.19, @logic_join.20 +@logic_right.19 + %.28 =w loadsb %.4 + %.29 =w extsb %.28 + %.30 =w csltw %.29, 0 + %.31 =w cnew %.30, 0 +@logic_join.20 + %.32 =w phi @logic_right.11 %.27, @logic_right.19 %.31 + %.33 =w cnew %.32, 0 + jnz %.33, @logic_right.17, @logic_join.18 +@logic_right.17 + %.34 =w loadsb %.2 + %.35 =w extsb %.34 + %.36 =w sub 0, 128 + %.37 =w loadsb %.4 + %.38 =w extsb %.37 + %.39 =w sub %.36, %.38 + %.40 =w csltw %.35, %.39 + %.41 =w cnew %.40, 0 +@logic_join.18 + %.42 =w phi @logic_join.20 %.33, @logic_right.17 %.41 + %.43 =w cnew %.42, 0 +@logic_join.12 + %.44 =w phi @logic_join.14 %.23, @logic_join.18 %.43 + %.45 =w cnew %.44, 0 + jnz %.45, @cond_true.8, @cond_false.9 +@cond_true.8 + %.46 =w loadsb %.2 + jmp @cond_join.10 +@cond_false.9 + %.47 =w loadsb %.2 + %.48 =w loadsb %.4 + %.49 =w add %.47, %.48 +@cond_join.10 + %.50 =w phi @cond_true.8 %.46, @cond_false.9 %.49 + ret %.50 +} +function w $safe_sub_func_int8_t_s_s(w %.1, w %.3) { +@start.21 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.22 + %.5 =w loadsb %.2 + %.6 =w loadsb %.4 + %.7 =w xor %.5, %.6 + %.8 =w extsb %.7 + %.9 =w loadsb %.2 + %.10 =w extsb %.9 + %.11 =w loadsb %.2 + %.12 =w loadsb %.4 + %.13 =w xor %.11, %.12 + %.14 =w extsb %.13 + %.15 =w xor 127, 18446744073709551615 + %.16 =w and %.14, %.15 + %.17 =w xor %.10, %.16 + %.18 =w loadsb %.4 + %.19 =w extsb %.18 + %.20 =w sub %.17, %.19 + %.21 =w loadsb %.4 + %.22 =w extsb %.21 + %.23 =w xor %.20, %.22 + %.24 =w and %.8, %.23 + %.25 =w csltw %.24, 0 + %.26 =w cnew %.25, 0 + jnz %.26, @cond_true.23, @cond_false.24 +@cond_true.23 + %.27 =w loadsb %.2 + jmp @cond_join.25 +@cond_false.24 + %.28 =w loadsb %.2 + %.29 =w loadsb %.4 + %.30 =w sub %.28, %.29 +@cond_join.25 + %.31 =w phi @cond_true.23 %.27, @cond_false.24 %.30 + ret %.31 +} +function w $safe_mul_func_int8_t_s_s(w %.1, w %.3) { +@start.26 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.27 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csgtw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.39, @logic_join.40 +@logic_right.39 + %.9 =w loadsb %.4 + %.10 =w extsb %.9 + %.11 =w csgtw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.40 + %.13 =w phi @body.27 %.8, @logic_right.39 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.37, @logic_join.38 +@logic_right.37 + %.15 =w loadsb %.2 + %.16 =w extsb %.15 + %.17 =w loadsb %.4 + %.18 =w extsb %.17 + %.19 =w div 127, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.38 + %.22 =w phi @logic_join.40 %.14, @logic_right.37 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @logic_join.36, @logic_right.35 +@logic_right.35 + %.24 =w loadsb %.2 + %.25 =w extsb %.24 + %.26 =w csgtw %.25, 0 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.43, @logic_join.44 +@logic_right.43 + %.28 =w loadsb %.4 + %.29 =w extsb %.28 + %.30 =w cslew %.29, 0 + %.31 =w cnew %.30, 0 +@logic_join.44 + %.32 =w phi @logic_right.35 %.27, @logic_right.43 %.31 + %.33 =w cnew %.32, 0 + jnz %.33, @logic_right.41, @logic_join.42 +@logic_right.41 + %.34 =w loadsb %.4 + %.35 =w extsb %.34 + %.36 =w sub 0, 128 + %.37 =w loadsb %.2 + %.38 =w extsb %.37 + %.39 =w div %.36, %.38 + %.40 =w csltw %.35, %.39 + %.41 =w cnew %.40, 0 +@logic_join.42 + %.42 =w phi @logic_join.44 %.33, @logic_right.41 %.41 + %.43 =w cnew %.42, 0 +@logic_join.36 + %.44 =w phi @logic_join.38 %.23, @logic_join.42 %.43 + %.45 =w cnew %.44, 0 + jnz %.45, @logic_join.34, @logic_right.33 +@logic_right.33 + %.46 =w loadsb %.2 + %.47 =w extsb %.46 + %.48 =w cslew %.47, 0 + %.49 =w cnew %.48, 0 + jnz %.49, @logic_right.47, @logic_join.48 +@logic_right.47 + %.50 =w loadsb %.4 + %.51 =w extsb %.50 + %.52 =w csgtw %.51, 0 + %.53 =w cnew %.52, 0 +@logic_join.48 + %.54 =w phi @logic_right.33 %.49, @logic_right.47 %.53 + %.55 =w cnew %.54, 0 + jnz %.55, @logic_right.45, @logic_join.46 +@logic_right.45 + %.56 =w loadsb %.2 + %.57 =w extsb %.56 + %.58 =w sub 0, 128 + %.59 =w loadsb %.4 + %.60 =w extsb %.59 + %.61 =w div %.58, %.60 + %.62 =w csltw %.57, %.61 + %.63 =w cnew %.62, 0 +@logic_join.46 + %.64 =w phi @logic_join.48 %.55, @logic_right.45 %.63 + %.65 =w cnew %.64, 0 +@logic_join.34 + %.66 =w phi @logic_join.36 %.45, @logic_join.46 %.65 + %.67 =w cnew %.66, 0 + jnz %.67, @logic_join.32, @logic_right.31 +@logic_right.31 + %.68 =w loadsb %.2 + %.69 =w extsb %.68 + %.70 =w cslew %.69, 0 + %.71 =w cnew %.70, 0 + jnz %.71, @logic_right.53, @logic_join.54 +@logic_right.53 + %.72 =w loadsb %.4 + %.73 =w extsb %.72 + %.74 =w cslew %.73, 0 + %.75 =w cnew %.74, 0 +@logic_join.54 + %.76 =w phi @logic_right.31 %.71, @logic_right.53 %.75 + %.77 =w cnew %.76, 0 + jnz %.77, @logic_right.51, @logic_join.52 +@logic_right.51 + %.78 =w loadsb %.2 + %.79 =w extsb %.78 + %.80 =w cnew %.79, 0 + %.81 =w cnew %.80, 0 +@logic_join.52 + %.82 =w phi @logic_join.54 %.77, @logic_right.51 %.81 + %.83 =w cnew %.82, 0 + jnz %.83, @logic_right.49, @logic_join.50 +@logic_right.49 + %.84 =w loadsb %.4 + %.85 =w extsb %.84 + %.86 =w loadsb %.2 + %.87 =w extsb %.86 + %.88 =w div 127, %.87 + %.89 =w csltw %.85, %.88 + %.90 =w cnew %.89, 0 +@logic_join.50 + %.91 =w phi @logic_join.52 %.83, @logic_right.49 %.90 + %.92 =w cnew %.91, 0 +@logic_join.32 + %.93 =w phi @logic_join.34 %.67, @logic_join.50 %.92 + %.94 =w cnew %.93, 0 + jnz %.94, @cond_true.28, @cond_false.29 +@cond_true.28 + %.95 =w loadsb %.2 + jmp @cond_join.30 +@cond_false.29 + %.96 =w loadsb %.2 + %.97 =w loadsb %.4 + %.98 =w mul %.96, %.97 +@cond_join.30 + %.99 =w phi @cond_true.28 %.95, @cond_false.29 %.98 + ret %.99 +} +function w $safe_mod_func_int8_t_s_s(w %.1, w %.3) { +@start.55 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.56 + %.5 =w loadsb %.4 + %.6 =w extsb %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.61, @logic_right.60 +@logic_right.60 + %.9 =w loadsb %.2 + %.10 =w extsb %.9 + %.11 =w sub 0, 128 + %.12 =w ceqw %.10, %.11 + %.13 =w cnew %.12, 0 + jnz %.13, @logic_right.62, @logic_join.63 +@logic_right.62 + %.14 =w loadsb %.4 + %.15 =w extsb %.14 + %.16 =w sub 0, 1 + %.17 =w ceqw %.15, %.16 + %.18 =w cnew %.17, 0 +@logic_join.63 + %.19 =w phi @logic_right.60 %.13, @logic_right.62 %.18 + %.20 =w cnew %.19, 0 +@logic_join.61 + %.21 =w phi @body.56 %.8, @logic_join.63 %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.57, @cond_false.58 +@cond_true.57 + %.23 =w loadsb %.2 + jmp @cond_join.59 +@cond_false.58 + %.24 =w loadsb %.2 + %.25 =w loadsb %.4 + %.26 =w rem %.24, %.25 +@cond_join.59 + %.27 =w phi @cond_true.57 %.23, @cond_false.58 %.26 + ret %.27 +} +function w $safe_div_func_int8_t_s_s(w %.1, w %.3) { +@start.64 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.65 + %.5 =w loadsb %.4 + %.6 =w extsb %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.70, @logic_right.69 +@logic_right.69 + %.9 =w loadsb %.2 + %.10 =w extsb %.9 + %.11 =w sub 0, 128 + %.12 =w ceqw %.10, %.11 + %.13 =w cnew %.12, 0 + jnz %.13, @logic_right.71, @logic_join.72 +@logic_right.71 + %.14 =w loadsb %.4 + %.15 =w extsb %.14 + %.16 =w sub 0, 1 + %.17 =w ceqw %.15, %.16 + %.18 =w cnew %.17, 0 +@logic_join.72 + %.19 =w phi @logic_right.69 %.13, @logic_right.71 %.18 + %.20 =w cnew %.19, 0 +@logic_join.70 + %.21 =w phi @body.65 %.8, @logic_join.72 %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.66, @cond_false.67 +@cond_true.66 + %.23 =w loadsb %.2 + jmp @cond_join.68 +@cond_false.67 + %.24 =w loadsb %.2 + %.25 =w loadsb %.4 + %.26 =w div %.24, %.25 +@cond_join.68 + %.27 =w phi @cond_true.66 %.23, @cond_false.67 %.26 + ret %.27 +} +function w $safe_lshift_func_int8_t_s_s(w %.1, w %.3) { +@start.73 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.74 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.83, @logic_right.82 +@logic_right.82 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.83 + %.13 =w phi @body.74 %.8, @logic_right.82 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.81, @logic_right.80 +@logic_right.80 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.81 + %.19 =w phi @logic_join.83 %.14, @logic_right.80 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @logic_join.79, @logic_right.78 +@logic_right.78 + %.21 =w loadsb %.2 + %.22 =w extsb %.21 + %.23 =w loadsw %.4 + %.24 =w copy %.23 + %.25 =w sar 127, %.24 + %.26 =w csgtw %.22, %.25 + %.27 =w cnew %.26, 0 +@logic_join.79 + %.28 =w phi @logic_join.81 %.20, @logic_right.78 %.27 + %.29 =w cnew %.28, 0 + jnz %.29, @cond_true.75, @cond_false.76 +@cond_true.75 + %.30 =w loadsb %.2 + %.31 =w extsb %.30 + jmp @cond_join.77 +@cond_false.76 + %.32 =w loadsb %.2 + %.33 =w extsb %.32 + %.34 =w loadsw %.4 + %.35 =w copy %.34 + %.36 =w shl %.33, %.35 +@cond_join.77 + %.37 =w phi @cond_true.75 %.31, @cond_false.76 %.36 + %.38 =w copy %.37 + ret %.38 +} +function w $safe_lshift_func_int8_t_s_u(w %.1, w %.3) { +@start.84 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.85 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.92, @logic_right.91 +@logic_right.91 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.92 + %.14 =w phi @body.85 %.8, @logic_right.91 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @logic_join.90, @logic_right.89 +@logic_right.89 + %.16 =w loadsb %.2 + %.17 =w extsb %.16 + %.18 =w loaduw %.4 + %.19 =w copy %.18 + %.20 =w sar 127, %.19 + %.21 =w csgtw %.17, %.20 + %.22 =w cnew %.21, 0 +@logic_join.90 + %.23 =w phi @logic_join.92 %.15, @logic_right.89 %.22 + %.24 =w cnew %.23, 0 + jnz %.24, @cond_true.86, @cond_false.87 +@cond_true.86 + %.25 =w loadsb %.2 + %.26 =w extsb %.25 + jmp @cond_join.88 +@cond_false.87 + %.27 =w loadsb %.2 + %.28 =w extsb %.27 + %.29 =w loaduw %.4 + %.30 =w copy %.29 + %.31 =w shl %.28, %.30 +@cond_join.88 + %.32 =w phi @cond_true.86 %.26, @cond_false.87 %.31 + %.33 =w copy %.32 + ret %.33 +} +function w $safe_rshift_func_int8_t_s_s(w %.1, w %.3) { +@start.93 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.94 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.101, @logic_right.100 +@logic_right.100 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.101 + %.13 =w phi @body.94 %.8, @logic_right.100 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.99, @logic_right.98 +@logic_right.98 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.99 + %.19 =w phi @logic_join.101 %.14, @logic_right.98 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @cond_true.95, @cond_false.96 +@cond_true.95 + %.21 =w loadsb %.2 + %.22 =w extsb %.21 + jmp @cond_join.97 +@cond_false.96 + %.23 =w loadsb %.2 + %.24 =w extsb %.23 + %.25 =w loadsw %.4 + %.26 =w copy %.25 + %.27 =w sar %.24, %.26 +@cond_join.97 + %.28 =w phi @cond_true.95 %.22, @cond_false.96 %.27 + %.29 =w copy %.28 + ret %.29 +} +function w $safe_rshift_func_int8_t_s_u(w %.1, w %.3) { +@start.102 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.103 + %.5 =w loadsb %.2 + %.6 =w extsb %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.108, @logic_right.107 +@logic_right.107 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.108 + %.14 =w phi @body.103 %.8, @logic_right.107 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @cond_true.104, @cond_false.105 +@cond_true.104 + %.16 =w loadsb %.2 + %.17 =w extsb %.16 + jmp @cond_join.106 +@cond_false.105 + %.18 =w loadsb %.2 + %.19 =w extsb %.18 + %.20 =w loaduw %.4 + %.21 =w copy %.20 + %.22 =w sar %.19, %.21 +@cond_join.106 + %.23 =w phi @cond_true.104 %.17, @cond_false.105 %.22 + %.24 =w copy %.23 + ret %.24 +} +function w $safe_unary_minus_func_int16_t_s(w %.1) { +@start.109 + %.2 =l alloc4 2 + storeh %.1, %.2 +@body.110 + %.3 =w loadsh %.2 + %.4 =w extsh %.3 + %.5 =w sub 0, 32767 + %.6 =w sub %.5, 1 + %.7 =w ceqw %.4, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.111, @cond_false.112 +@cond_true.111 + %.9 =w loadsh %.2 + %.10 =w extsh %.9 + jmp @cond_join.113 +@cond_false.112 + %.11 =w loadsh %.2 + %.12 =w extsh %.11 + %.13 =w sub 0, %.12 +@cond_join.113 + %.14 =w phi @cond_true.111 %.10, @cond_false.112 %.13 + %.15 =w copy %.14 + ret %.15 +} +function w $safe_add_func_int16_t_s_s(w %.1, w %.3) { +@start.114 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.115 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csgtw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.123, @logic_join.124 +@logic_right.123 + %.9 =w loadsh %.4 + %.10 =w extsh %.9 + %.11 =w csgtw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.124 + %.13 =w phi @body.115 %.8, @logic_right.123 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.121, @logic_join.122 +@logic_right.121 + %.15 =w loadsh %.2 + %.16 =w extsh %.15 + %.17 =w loadsh %.4 + %.18 =w extsh %.17 + %.19 =w sub 32767, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.122 + %.22 =w phi @logic_join.124 %.14, @logic_right.121 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @logic_join.120, @logic_right.119 +@logic_right.119 + %.24 =w loadsh %.2 + %.25 =w extsh %.24 + %.26 =w csltw %.25, 0 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.127, @logic_join.128 +@logic_right.127 + %.28 =w loadsh %.4 + %.29 =w extsh %.28 + %.30 =w csltw %.29, 0 + %.31 =w cnew %.30, 0 +@logic_join.128 + %.32 =w phi @logic_right.119 %.27, @logic_right.127 %.31 + %.33 =w cnew %.32, 0 + jnz %.33, @logic_right.125, @logic_join.126 +@logic_right.125 + %.34 =w loadsh %.2 + %.35 =w extsh %.34 + %.36 =w sub 0, 32767 + %.37 =w sub %.36, 1 + %.38 =w loadsh %.4 + %.39 =w extsh %.38 + %.40 =w sub %.37, %.39 + %.41 =w csltw %.35, %.40 + %.42 =w cnew %.41, 0 +@logic_join.126 + %.43 =w phi @logic_join.128 %.33, @logic_right.125 %.42 + %.44 =w cnew %.43, 0 +@logic_join.120 + %.45 =w phi @logic_join.122 %.23, @logic_join.126 %.44 + %.46 =w cnew %.45, 0 + jnz %.46, @cond_true.116, @cond_false.117 +@cond_true.116 + %.47 =w loadsh %.2 + jmp @cond_join.118 +@cond_false.117 + %.48 =w loadsh %.2 + %.49 =w loadsh %.4 + %.50 =w add %.48, %.49 +@cond_join.118 + %.51 =w phi @cond_true.116 %.47, @cond_false.117 %.50 + ret %.51 +} +function w $safe_sub_func_int16_t_s_s(w %.1, w %.3) { +@start.129 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.130 + %.5 =w loadsh %.2 + %.6 =w loadsh %.4 + %.7 =w xor %.5, %.6 + %.8 =w extsh %.7 + %.9 =w loadsh %.2 + %.10 =w extsh %.9 + %.11 =w loadsh %.2 + %.12 =w loadsh %.4 + %.13 =w xor %.11, %.12 + %.14 =w extsh %.13 + %.15 =w xor 32767, 18446744073709551615 + %.16 =w and %.14, %.15 + %.17 =w xor %.10, %.16 + %.18 =w loadsh %.4 + %.19 =w extsh %.18 + %.20 =w sub %.17, %.19 + %.21 =w loadsh %.4 + %.22 =w extsh %.21 + %.23 =w xor %.20, %.22 + %.24 =w and %.8, %.23 + %.25 =w csltw %.24, 0 + %.26 =w cnew %.25, 0 + jnz %.26, @cond_true.131, @cond_false.132 +@cond_true.131 + %.27 =w loadsh %.2 + jmp @cond_join.133 +@cond_false.132 + %.28 =w loadsh %.2 + %.29 =w loadsh %.4 + %.30 =w sub %.28, %.29 +@cond_join.133 + %.31 =w phi @cond_true.131 %.27, @cond_false.132 %.30 + ret %.31 +} +function w $safe_mul_func_int16_t_s_s(w %.1, w %.3) { +@start.134 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.135 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csgtw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.147, @logic_join.148 +@logic_right.147 + %.9 =w loadsh %.4 + %.10 =w extsh %.9 + %.11 =w csgtw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.148 + %.13 =w phi @body.135 %.8, @logic_right.147 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.145, @logic_join.146 +@logic_right.145 + %.15 =w loadsh %.2 + %.16 =w extsh %.15 + %.17 =w loadsh %.4 + %.18 =w extsh %.17 + %.19 =w div 32767, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.146 + %.22 =w phi @logic_join.148 %.14, @logic_right.145 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @logic_join.144, @logic_right.143 +@logic_right.143 + %.24 =w loadsh %.2 + %.25 =w extsh %.24 + %.26 =w csgtw %.25, 0 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.151, @logic_join.152 +@logic_right.151 + %.28 =w loadsh %.4 + %.29 =w extsh %.28 + %.30 =w cslew %.29, 0 + %.31 =w cnew %.30, 0 +@logic_join.152 + %.32 =w phi @logic_right.143 %.27, @logic_right.151 %.31 + %.33 =w cnew %.32, 0 + jnz %.33, @logic_right.149, @logic_join.150 +@logic_right.149 + %.34 =w loadsh %.4 + %.35 =w extsh %.34 + %.36 =w sub 0, 32767 + %.37 =w sub %.36, 1 + %.38 =w loadsh %.2 + %.39 =w extsh %.38 + %.40 =w div %.37, %.39 + %.41 =w csltw %.35, %.40 + %.42 =w cnew %.41, 0 +@logic_join.150 + %.43 =w phi @logic_join.152 %.33, @logic_right.149 %.42 + %.44 =w cnew %.43, 0 +@logic_join.144 + %.45 =w phi @logic_join.146 %.23, @logic_join.150 %.44 + %.46 =w cnew %.45, 0 + jnz %.46, @logic_join.142, @logic_right.141 +@logic_right.141 + %.47 =w loadsh %.2 + %.48 =w extsh %.47 + %.49 =w cslew %.48, 0 + %.50 =w cnew %.49, 0 + jnz %.50, @logic_right.155, @logic_join.156 +@logic_right.155 + %.51 =w loadsh %.4 + %.52 =w extsh %.51 + %.53 =w csgtw %.52, 0 + %.54 =w cnew %.53, 0 +@logic_join.156 + %.55 =w phi @logic_right.141 %.50, @logic_right.155 %.54 + %.56 =w cnew %.55, 0 + jnz %.56, @logic_right.153, @logic_join.154 +@logic_right.153 + %.57 =w loadsh %.2 + %.58 =w extsh %.57 + %.59 =w sub 0, 32767 + %.60 =w sub %.59, 1 + %.61 =w loadsh %.4 + %.62 =w extsh %.61 + %.63 =w div %.60, %.62 + %.64 =w csltw %.58, %.63 + %.65 =w cnew %.64, 0 +@logic_join.154 + %.66 =w phi @logic_join.156 %.56, @logic_right.153 %.65 + %.67 =w cnew %.66, 0 +@logic_join.142 + %.68 =w phi @logic_join.144 %.46, @logic_join.154 %.67 + %.69 =w cnew %.68, 0 + jnz %.69, @logic_join.140, @logic_right.139 +@logic_right.139 + %.70 =w loadsh %.2 + %.71 =w extsh %.70 + %.72 =w cslew %.71, 0 + %.73 =w cnew %.72, 0 + jnz %.73, @logic_right.161, @logic_join.162 +@logic_right.161 + %.74 =w loadsh %.4 + %.75 =w extsh %.74 + %.76 =w cslew %.75, 0 + %.77 =w cnew %.76, 0 +@logic_join.162 + %.78 =w phi @logic_right.139 %.73, @logic_right.161 %.77 + %.79 =w cnew %.78, 0 + jnz %.79, @logic_right.159, @logic_join.160 +@logic_right.159 + %.80 =w loadsh %.2 + %.81 =w extsh %.80 + %.82 =w cnew %.81, 0 + %.83 =w cnew %.82, 0 +@logic_join.160 + %.84 =w phi @logic_join.162 %.79, @logic_right.159 %.83 + %.85 =w cnew %.84, 0 + jnz %.85, @logic_right.157, @logic_join.158 +@logic_right.157 + %.86 =w loadsh %.4 + %.87 =w extsh %.86 + %.88 =w loadsh %.2 + %.89 =w extsh %.88 + %.90 =w div 32767, %.89 + %.91 =w csltw %.87, %.90 + %.92 =w cnew %.91, 0 +@logic_join.158 + %.93 =w phi @logic_join.160 %.85, @logic_right.157 %.92 + %.94 =w cnew %.93, 0 +@logic_join.140 + %.95 =w phi @logic_join.142 %.69, @logic_join.158 %.94 + %.96 =w cnew %.95, 0 + jnz %.96, @cond_true.136, @cond_false.137 +@cond_true.136 + %.97 =w loadsh %.2 + jmp @cond_join.138 +@cond_false.137 + %.98 =w loadsh %.2 + %.99 =w loadsh %.4 + %.100 =w mul %.98, %.99 +@cond_join.138 + %.101 =w phi @cond_true.136 %.97, @cond_false.137 %.100 + ret %.101 +} +function w $safe_mod_func_int16_t_s_s(w %.1, w %.3) { +@start.163 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.164 + %.5 =w loadsh %.4 + %.6 =w extsh %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.169, @logic_right.168 +@logic_right.168 + %.9 =w loadsh %.2 + %.10 =w extsh %.9 + %.11 =w sub 0, 32767 + %.12 =w sub %.11, 1 + %.13 =w ceqw %.10, %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.170, @logic_join.171 +@logic_right.170 + %.15 =w loadsh %.4 + %.16 =w extsh %.15 + %.17 =w sub 0, 1 + %.18 =w ceqw %.16, %.17 + %.19 =w cnew %.18, 0 +@logic_join.171 + %.20 =w phi @logic_right.168 %.14, @logic_right.170 %.19 + %.21 =w cnew %.20, 0 +@logic_join.169 + %.22 =w phi @body.164 %.8, @logic_join.171 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @cond_true.165, @cond_false.166 +@cond_true.165 + %.24 =w loadsh %.2 + jmp @cond_join.167 +@cond_false.166 + %.25 =w loadsh %.2 + %.26 =w loadsh %.4 + %.27 =w rem %.25, %.26 +@cond_join.167 + %.28 =w phi @cond_true.165 %.24, @cond_false.166 %.27 + ret %.28 +} +function w $safe_div_func_int16_t_s_s(w %.1, w %.3) { +@start.172 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.173 + %.5 =w loadsh %.4 + %.6 =w extsh %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.178, @logic_right.177 +@logic_right.177 + %.9 =w loadsh %.2 + %.10 =w extsh %.9 + %.11 =w sub 0, 32767 + %.12 =w sub %.11, 1 + %.13 =w ceqw %.10, %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.179, @logic_join.180 +@logic_right.179 + %.15 =w loadsh %.4 + %.16 =w extsh %.15 + %.17 =w sub 0, 1 + %.18 =w ceqw %.16, %.17 + %.19 =w cnew %.18, 0 +@logic_join.180 + %.20 =w phi @logic_right.177 %.14, @logic_right.179 %.19 + %.21 =w cnew %.20, 0 +@logic_join.178 + %.22 =w phi @body.173 %.8, @logic_join.180 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @cond_true.174, @cond_false.175 +@cond_true.174 + %.24 =w loadsh %.2 + jmp @cond_join.176 +@cond_false.175 + %.25 =w loadsh %.2 + %.26 =w loadsh %.4 + %.27 =w div %.25, %.26 +@cond_join.176 + %.28 =w phi @cond_true.174 %.24, @cond_false.175 %.27 + ret %.28 +} +function w $safe_lshift_func_int16_t_s_s(w %.1, w %.3) { +@start.181 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.182 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.191, @logic_right.190 +@logic_right.190 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.191 + %.13 =w phi @body.182 %.8, @logic_right.190 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.189, @logic_right.188 +@logic_right.188 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.189 + %.19 =w phi @logic_join.191 %.14, @logic_right.188 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @logic_join.187, @logic_right.186 +@logic_right.186 + %.21 =w loadsh %.2 + %.22 =w extsh %.21 + %.23 =w loadsw %.4 + %.24 =w copy %.23 + %.25 =w sar 32767, %.24 + %.26 =w csgtw %.22, %.25 + %.27 =w cnew %.26, 0 +@logic_join.187 + %.28 =w phi @logic_join.189 %.20, @logic_right.186 %.27 + %.29 =w cnew %.28, 0 + jnz %.29, @cond_true.183, @cond_false.184 +@cond_true.183 + %.30 =w loadsh %.2 + %.31 =w extsh %.30 + jmp @cond_join.185 +@cond_false.184 + %.32 =w loadsh %.2 + %.33 =w extsh %.32 + %.34 =w loadsw %.4 + %.35 =w copy %.34 + %.36 =w shl %.33, %.35 +@cond_join.185 + %.37 =w phi @cond_true.183 %.31, @cond_false.184 %.36 + %.38 =w copy %.37 + ret %.38 +} +function w $safe_lshift_func_int16_t_s_u(w %.1, w %.3) { +@start.192 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.193 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.200, @logic_right.199 +@logic_right.199 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.200 + %.14 =w phi @body.193 %.8, @logic_right.199 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @logic_join.198, @logic_right.197 +@logic_right.197 + %.16 =w loadsh %.2 + %.17 =w extsh %.16 + %.18 =w loaduw %.4 + %.19 =w copy %.18 + %.20 =w sar 32767, %.19 + %.21 =w csgtw %.17, %.20 + %.22 =w cnew %.21, 0 +@logic_join.198 + %.23 =w phi @logic_join.200 %.15, @logic_right.197 %.22 + %.24 =w cnew %.23, 0 + jnz %.24, @cond_true.194, @cond_false.195 +@cond_true.194 + %.25 =w loadsh %.2 + %.26 =w extsh %.25 + jmp @cond_join.196 +@cond_false.195 + %.27 =w loadsh %.2 + %.28 =w extsh %.27 + %.29 =w loaduw %.4 + %.30 =w copy %.29 + %.31 =w shl %.28, %.30 +@cond_join.196 + %.32 =w phi @cond_true.194 %.26, @cond_false.195 %.31 + %.33 =w copy %.32 + ret %.33 +} +function w $safe_rshift_func_int16_t_s_s(w %.1, w %.3) { +@start.201 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.202 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.209, @logic_right.208 +@logic_right.208 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.209 + %.13 =w phi @body.202 %.8, @logic_right.208 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.207, @logic_right.206 +@logic_right.206 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.207 + %.19 =w phi @logic_join.209 %.14, @logic_right.206 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @cond_true.203, @cond_false.204 +@cond_true.203 + %.21 =w loadsh %.2 + %.22 =w extsh %.21 + jmp @cond_join.205 +@cond_false.204 + %.23 =w loadsh %.2 + %.24 =w extsh %.23 + %.25 =w loadsw %.4 + %.26 =w copy %.25 + %.27 =w sar %.24, %.26 +@cond_join.205 + %.28 =w phi @cond_true.203 %.22, @cond_false.204 %.27 + %.29 =w copy %.28 + ret %.29 +} +function w $safe_rshift_func_int16_t_s_u(w %.1, w %.3) { +@start.210 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.211 + %.5 =w loadsh %.2 + %.6 =w extsh %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.216, @logic_right.215 +@logic_right.215 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.216 + %.14 =w phi @body.211 %.8, @logic_right.215 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @cond_true.212, @cond_false.213 +@cond_true.212 + %.16 =w loadsh %.2 + %.17 =w extsh %.16 + jmp @cond_join.214 +@cond_false.213 + %.18 =w loadsh %.2 + %.19 =w extsh %.18 + %.20 =w loaduw %.4 + %.21 =w copy %.20 + %.22 =w sar %.19, %.21 +@cond_join.214 + %.23 =w phi @cond_true.212 %.17, @cond_false.213 %.22 + %.24 =w copy %.23 + ret %.24 +} +function w $safe_unary_minus_func_int32_t_s(w %.1) { +@start.217 + %.2 =l alloc4 4 + storew %.1, %.2 +@body.218 + %.3 =w loadsw %.2 + %.4 =w sub 0, 2147483647 + %.5 =w sub %.4, 1 + %.6 =w ceqw %.3, %.5 + %.7 =w cnew %.6, 0 + jnz %.7, @cond_true.219, @cond_false.220 +@cond_true.219 + %.8 =w loadsw %.2 + jmp @cond_join.221 +@cond_false.220 + %.9 =w loadsw %.2 + %.10 =w sub 0, %.9 +@cond_join.221 + %.11 =w phi @cond_true.219 %.8, @cond_false.220 %.10 + ret %.11 +} +function w $safe_add_func_int32_t_s_s(w %.1, w %.3) { +@start.222 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.223 + %.5 =w loadsw %.2 + %.6 =w csgtw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_right.231, @logic_join.232 +@logic_right.231 + %.8 =w loadsw %.4 + %.9 =w csgtw %.8, 0 + %.10 =w cnew %.9, 0 +@logic_join.232 + %.11 =w phi @body.223 %.7, @logic_right.231 %.10 + %.12 =w cnew %.11, 0 + jnz %.12, @logic_right.229, @logic_join.230 +@logic_right.229 + %.13 =w loadsw %.2 + %.14 =w loadsw %.4 + %.15 =w sub 2147483647, %.14 + %.16 =w csgtw %.13, %.15 + %.17 =w cnew %.16, 0 +@logic_join.230 + %.18 =w phi @logic_join.232 %.12, @logic_right.229 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @logic_join.228, @logic_right.227 +@logic_right.227 + %.20 =w loadsw %.2 + %.21 =w csltw %.20, 0 + %.22 =w cnew %.21, 0 + jnz %.22, @logic_right.235, @logic_join.236 +@logic_right.235 + %.23 =w loadsw %.4 + %.24 =w csltw %.23, 0 + %.25 =w cnew %.24, 0 +@logic_join.236 + %.26 =w phi @logic_right.227 %.22, @logic_right.235 %.25 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.233, @logic_join.234 +@logic_right.233 + %.28 =w loadsw %.2 + %.29 =w sub 0, 2147483647 + %.30 =w sub %.29, 1 + %.31 =w loadsw %.4 + %.32 =w sub %.30, %.31 + %.33 =w csltw %.28, %.32 + %.34 =w cnew %.33, 0 +@logic_join.234 + %.35 =w phi @logic_join.236 %.27, @logic_right.233 %.34 + %.36 =w cnew %.35, 0 +@logic_join.228 + %.37 =w phi @logic_join.230 %.19, @logic_join.234 %.36 + %.38 =w cnew %.37, 0 + jnz %.38, @cond_true.224, @cond_false.225 +@cond_true.224 + %.39 =w loadsw %.2 + jmp @cond_join.226 +@cond_false.225 + %.40 =w loadsw %.2 + %.41 =w loadsw %.4 + %.42 =w add %.40, %.41 +@cond_join.226 + %.43 =w phi @cond_true.224 %.39, @cond_false.225 %.42 + ret %.43 +} +function w $safe_sub_func_int32_t_s_s(w %.1, w %.3) { +@start.237 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.238 + %.5 =w loadsw %.2 + %.6 =w loadsw %.4 + %.7 =w xor %.5, %.6 + %.8 =w loadsw %.2 + %.9 =w loadsw %.2 + %.10 =w loadsw %.4 + %.11 =w xor %.9, %.10 + %.12 =w xor 2147483647, 18446744073709551615 + %.13 =w and %.11, %.12 + %.14 =w xor %.8, %.13 + %.15 =w loadsw %.4 + %.16 =w sub %.14, %.15 + %.17 =w loadsw %.4 + %.18 =w xor %.16, %.17 + %.19 =w and %.7, %.18 + %.20 =w csltw %.19, 0 + %.21 =w cnew %.20, 0 + jnz %.21, @cond_true.239, @cond_false.240 +@cond_true.239 + %.22 =w loadsw %.2 + jmp @cond_join.241 +@cond_false.240 + %.23 =w loadsw %.2 + %.24 =w loadsw %.4 + %.25 =w sub %.23, %.24 +@cond_join.241 + %.26 =w phi @cond_true.239 %.22, @cond_false.240 %.25 + ret %.26 +} +function w $safe_mul_func_int32_t_s_s(w %.1, w %.3) { +@start.242 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.243 + %.5 =w loadsw %.2 + %.6 =w csgtw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_right.255, @logic_join.256 +@logic_right.255 + %.8 =w loadsw %.4 + %.9 =w csgtw %.8, 0 + %.10 =w cnew %.9, 0 +@logic_join.256 + %.11 =w phi @body.243 %.7, @logic_right.255 %.10 + %.12 =w cnew %.11, 0 + jnz %.12, @logic_right.253, @logic_join.254 +@logic_right.253 + %.13 =w loadsw %.2 + %.14 =w loadsw %.4 + %.15 =w div 2147483647, %.14 + %.16 =w csgtw %.13, %.15 + %.17 =w cnew %.16, 0 +@logic_join.254 + %.18 =w phi @logic_join.256 %.12, @logic_right.253 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @logic_join.252, @logic_right.251 +@logic_right.251 + %.20 =w loadsw %.2 + %.21 =w csgtw %.20, 0 + %.22 =w cnew %.21, 0 + jnz %.22, @logic_right.259, @logic_join.260 +@logic_right.259 + %.23 =w loadsw %.4 + %.24 =w cslew %.23, 0 + %.25 =w cnew %.24, 0 +@logic_join.260 + %.26 =w phi @logic_right.251 %.22, @logic_right.259 %.25 + %.27 =w cnew %.26, 0 + jnz %.27, @logic_right.257, @logic_join.258 +@logic_right.257 + %.28 =w loadsw %.4 + %.29 =w sub 0, 2147483647 + %.30 =w sub %.29, 1 + %.31 =w loadsw %.2 + %.32 =w div %.30, %.31 + %.33 =w csltw %.28, %.32 + %.34 =w cnew %.33, 0 +@logic_join.258 + %.35 =w phi @logic_join.260 %.27, @logic_right.257 %.34 + %.36 =w cnew %.35, 0 +@logic_join.252 + %.37 =w phi @logic_join.254 %.19, @logic_join.258 %.36 + %.38 =w cnew %.37, 0 + jnz %.38, @logic_join.250, @logic_right.249 +@logic_right.249 + %.39 =w loadsw %.2 + %.40 =w cslew %.39, 0 + %.41 =w cnew %.40, 0 + jnz %.41, @logic_right.263, @logic_join.264 +@logic_right.263 + %.42 =w loadsw %.4 + %.43 =w csgtw %.42, 0 + %.44 =w cnew %.43, 0 +@logic_join.264 + %.45 =w phi @logic_right.249 %.41, @logic_right.263 %.44 + %.46 =w cnew %.45, 0 + jnz %.46, @logic_right.261, @logic_join.262 +@logic_right.261 + %.47 =w loadsw %.2 + %.48 =w sub 0, 2147483647 + %.49 =w sub %.48, 1 + %.50 =w loadsw %.4 + %.51 =w div %.49, %.50 + %.52 =w csltw %.47, %.51 + %.53 =w cnew %.52, 0 +@logic_join.262 + %.54 =w phi @logic_join.264 %.46, @logic_right.261 %.53 + %.55 =w cnew %.54, 0 +@logic_join.250 + %.56 =w phi @logic_join.252 %.38, @logic_join.262 %.55 + %.57 =w cnew %.56, 0 + jnz %.57, @logic_join.248, @logic_right.247 +@logic_right.247 + %.58 =w loadsw %.2 + %.59 =w cslew %.58, 0 + %.60 =w cnew %.59, 0 + jnz %.60, @logic_right.269, @logic_join.270 +@logic_right.269 + %.61 =w loadsw %.4 + %.62 =w cslew %.61, 0 + %.63 =w cnew %.62, 0 +@logic_join.270 + %.64 =w phi @logic_right.247 %.60, @logic_right.269 %.63 + %.65 =w cnew %.64, 0 + jnz %.65, @logic_right.267, @logic_join.268 +@logic_right.267 + %.66 =w loadsw %.2 + %.67 =w cnew %.66, 0 + %.68 =w cnew %.67, 0 +@logic_join.268 + %.69 =w phi @logic_join.270 %.65, @logic_right.267 %.68 + %.70 =w cnew %.69, 0 + jnz %.70, @logic_right.265, @logic_join.266 +@logic_right.265 + %.71 =w loadsw %.4 + %.72 =w loadsw %.2 + %.73 =w div 2147483647, %.72 + %.74 =w csltw %.71, %.73 + %.75 =w cnew %.74, 0 +@logic_join.266 + %.76 =w phi @logic_join.268 %.70, @logic_right.265 %.75 + %.77 =w cnew %.76, 0 +@logic_join.248 + %.78 =w phi @logic_join.250 %.57, @logic_join.266 %.77 + %.79 =w cnew %.78, 0 + jnz %.79, @cond_true.244, @cond_false.245 +@cond_true.244 + %.80 =w loadsw %.2 + jmp @cond_join.246 +@cond_false.245 + %.81 =w loadsw %.2 + %.82 =w loadsw %.4 + %.83 =w mul %.81, %.82 +@cond_join.246 + %.84 =w phi @cond_true.244 %.80, @cond_false.245 %.83 + ret %.84 +} +function w $safe_mod_func_int32_t_s_s(w %.1, w %.3) { +@start.271 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.272 + %.5 =w loadsw %.4 + %.6 =w ceqw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.277, @logic_right.276 +@logic_right.276 + %.8 =w loadsw %.2 + %.9 =w sub 0, 2147483647 + %.10 =w sub %.9, 1 + %.11 =w ceqw %.8, %.10 + %.12 =w cnew %.11, 0 + jnz %.12, @logic_right.278, @logic_join.279 +@logic_right.278 + %.13 =w loadsw %.4 + %.14 =w sub 0, 1 + %.15 =w ceqw %.13, %.14 + %.16 =w cnew %.15, 0 +@logic_join.279 + %.17 =w phi @logic_right.276 %.12, @logic_right.278 %.16 + %.18 =w cnew %.17, 0 +@logic_join.277 + %.19 =w phi @body.272 %.7, @logic_join.279 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @cond_true.273, @cond_false.274 +@cond_true.273 + %.21 =w loadsw %.2 + jmp @cond_join.275 +@cond_false.274 + %.22 =w loadsw %.2 + %.23 =w loadsw %.4 + %.24 =w rem %.22, %.23 +@cond_join.275 + %.25 =w phi @cond_true.273 %.21, @cond_false.274 %.24 + ret %.25 +} +function w $safe_div_func_int32_t_s_s(w %.1, w %.3) { +@start.280 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.281 + %.5 =w loadsw %.4 + %.6 =w ceqw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.286, @logic_right.285 +@logic_right.285 + %.8 =w loadsw %.2 + %.9 =w sub 0, 2147483647 + %.10 =w sub %.9, 1 + %.11 =w ceqw %.8, %.10 + %.12 =w cnew %.11, 0 + jnz %.12, @logic_right.287, @logic_join.288 +@logic_right.287 + %.13 =w loadsw %.4 + %.14 =w sub 0, 1 + %.15 =w ceqw %.13, %.14 + %.16 =w cnew %.15, 0 +@logic_join.288 + %.17 =w phi @logic_right.285 %.12, @logic_right.287 %.16 + %.18 =w cnew %.17, 0 +@logic_join.286 + %.19 =w phi @body.281 %.7, @logic_join.288 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @cond_true.282, @cond_false.283 +@cond_true.282 + %.21 =w loadsw %.2 + jmp @cond_join.284 +@cond_false.283 + %.22 =w loadsw %.2 + %.23 =w loadsw %.4 + %.24 =w div %.22, %.23 +@cond_join.284 + %.25 =w phi @cond_true.282 %.21, @cond_false.283 %.24 + ret %.25 +} +function w $safe_lshift_func_int32_t_s_s(w %.1, w %.3) { +@start.289 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.290 + %.5 =w loadsw %.2 + %.6 =w csltw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.299, @logic_right.298 +@logic_right.298 + %.8 =w loadsw %.4 + %.9 =w copy %.8 + %.10 =w csltw %.9, 0 + %.11 =w cnew %.10, 0 +@logic_join.299 + %.12 =w phi @body.290 %.7, @logic_right.298 %.11 + %.13 =w cnew %.12, 0 + jnz %.13, @logic_join.297, @logic_right.296 +@logic_right.296 + %.14 =w loadsw %.4 + %.15 =w copy %.14 + %.16 =w csgew %.15, 32 + %.17 =w cnew %.16, 0 +@logic_join.297 + %.18 =w phi @logic_join.299 %.13, @logic_right.296 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @logic_join.295, @logic_right.294 +@logic_right.294 + %.20 =w loadsw %.2 + %.21 =w loadsw %.4 + %.22 =w copy %.21 + %.23 =w sar 2147483647, %.22 + %.24 =w csgtw %.20, %.23 + %.25 =w cnew %.24, 0 +@logic_join.295 + %.26 =w phi @logic_join.297 %.19, @logic_right.294 %.25 + %.27 =w cnew %.26, 0 + jnz %.27, @cond_true.291, @cond_false.292 +@cond_true.291 + %.28 =w loadsw %.2 + jmp @cond_join.293 +@cond_false.292 + %.29 =w loadsw %.2 + %.30 =w loadsw %.4 + %.31 =w copy %.30 + %.32 =w shl %.29, %.31 +@cond_join.293 + %.33 =w phi @cond_true.291 %.28, @cond_false.292 %.32 + ret %.33 +} +function w $safe_lshift_func_int32_t_s_u(w %.1, w %.3) { +@start.300 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.301 + %.5 =w loadsw %.2 + %.6 =w csltw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.308, @logic_right.307 +@logic_right.307 + %.8 =w loaduw %.4 + %.9 =w copy %.8 + %.10 =w copy 32 + %.11 =w cugew %.9, %.10 + %.12 =w cnew %.11, 0 +@logic_join.308 + %.13 =w phi @body.301 %.7, @logic_right.307 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.306, @logic_right.305 +@logic_right.305 + %.15 =w loadsw %.2 + %.16 =w loaduw %.4 + %.17 =w copy %.16 + %.18 =w sar 2147483647, %.17 + %.19 =w csgtw %.15, %.18 + %.20 =w cnew %.19, 0 +@logic_join.306 + %.21 =w phi @logic_join.308 %.14, @logic_right.305 %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.302, @cond_false.303 +@cond_true.302 + %.23 =w loadsw %.2 + jmp @cond_join.304 +@cond_false.303 + %.24 =w loadsw %.2 + %.25 =w loaduw %.4 + %.26 =w copy %.25 + %.27 =w shl %.24, %.26 +@cond_join.304 + %.28 =w phi @cond_true.302 %.23, @cond_false.303 %.27 + ret %.28 +} +function w $safe_rshift_func_int32_t_s_s(w %.1, w %.3) { +@start.309 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.310 + %.5 =w loadsw %.2 + %.6 =w csltw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.317, @logic_right.316 +@logic_right.316 + %.8 =w loadsw %.4 + %.9 =w copy %.8 + %.10 =w csltw %.9, 0 + %.11 =w cnew %.10, 0 +@logic_join.317 + %.12 =w phi @body.310 %.7, @logic_right.316 %.11 + %.13 =w cnew %.12, 0 + jnz %.13, @logic_join.315, @logic_right.314 +@logic_right.314 + %.14 =w loadsw %.4 + %.15 =w copy %.14 + %.16 =w csgew %.15, 32 + %.17 =w cnew %.16, 0 +@logic_join.315 + %.18 =w phi @logic_join.317 %.13, @logic_right.314 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @cond_true.311, @cond_false.312 +@cond_true.311 + %.20 =w loadsw %.2 + jmp @cond_join.313 +@cond_false.312 + %.21 =w loadsw %.2 + %.22 =w loadsw %.4 + %.23 =w copy %.22 + %.24 =w sar %.21, %.23 +@cond_join.313 + %.25 =w phi @cond_true.311 %.20, @cond_false.312 %.24 + ret %.25 +} +function w $safe_rshift_func_int32_t_s_u(w %.1, w %.3) { +@start.318 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.319 + %.5 =w loadsw %.2 + %.6 =w csltw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_join.324, @logic_right.323 +@logic_right.323 + %.8 =w loaduw %.4 + %.9 =w copy %.8 + %.10 =w copy 32 + %.11 =w cugew %.9, %.10 + %.12 =w cnew %.11, 0 +@logic_join.324 + %.13 =w phi @body.319 %.7, @logic_right.323 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.320, @cond_false.321 +@cond_true.320 + %.15 =w loadsw %.2 + jmp @cond_join.322 +@cond_false.321 + %.16 =w loadsw %.2 + %.17 =w loaduw %.4 + %.18 =w copy %.17 + %.19 =w sar %.16, %.18 +@cond_join.322 + %.20 =w phi @cond_true.320 %.15, @cond_false.321 %.19 + ret %.20 +} +function l $safe_unary_minus_func_int64_t_s(l %.1) { +@start.325 + %.2 =l alloc8 8 + storel %.1, %.2 +@body.326 + %.3 =l loadl %.2 + %.4 =l extsw 0 + %.5 =l sub %.4, 9223372036854775807 + %.6 =l extsw 1 + %.7 =l sub %.5, %.6 + %.8 =w ceql %.3, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.327, @cond_false.328 +@cond_true.327 + %.10 =l loadl %.2 + jmp @cond_join.329 +@cond_false.328 + %.11 =l extsw 0 + %.12 =l loadl %.2 + %.13 =l sub %.11, %.12 +@cond_join.329 + %.14 =l phi @cond_true.327 %.10, @cond_false.328 %.13 + ret %.14 +} +function l $safe_add_func_int64_t_s_s(l %.1, l %.3) { +@start.330 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.331 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csgtl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.339, @logic_join.340 +@logic_right.339 + %.9 =l loadl %.4 + %.10 =l extsw 0 + %.11 =w csgtl %.9, %.10 + %.12 =w cnew %.11, 0 +@logic_join.340 + %.13 =w phi @body.331 %.8, @logic_right.339 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.337, @logic_join.338 +@logic_right.337 + %.15 =l loadl %.2 + %.16 =l loadl %.4 + %.17 =l sub 9223372036854775807, %.16 + %.18 =w csgtl %.15, %.17 + %.19 =w cnew %.18, 0 +@logic_join.338 + %.20 =w phi @logic_join.340 %.14, @logic_right.337 %.19 + %.21 =w cnew %.20, 0 + jnz %.21, @logic_join.336, @logic_right.335 +@logic_right.335 + %.22 =l loadl %.2 + %.23 =l extsw 0 + %.24 =w csltl %.22, %.23 + %.25 =w cnew %.24, 0 + jnz %.25, @logic_right.343, @logic_join.344 +@logic_right.343 + %.26 =l loadl %.4 + %.27 =l extsw 0 + %.28 =w csltl %.26, %.27 + %.29 =w cnew %.28, 0 +@logic_join.344 + %.30 =w phi @logic_right.335 %.25, @logic_right.343 %.29 + %.31 =w cnew %.30, 0 + jnz %.31, @logic_right.341, @logic_join.342 +@logic_right.341 + %.32 =l loadl %.2 + %.33 =l extsw 0 + %.34 =l sub %.33, 9223372036854775807 + %.35 =l extsw 1 + %.36 =l sub %.34, %.35 + %.37 =l loadl %.4 + %.38 =l sub %.36, %.37 + %.39 =w csltl %.32, %.38 + %.40 =w cnew %.39, 0 +@logic_join.342 + %.41 =w phi @logic_join.344 %.31, @logic_right.341 %.40 + %.42 =w cnew %.41, 0 +@logic_join.336 + %.43 =w phi @logic_join.338 %.21, @logic_join.342 %.42 + %.44 =w cnew %.43, 0 + jnz %.44, @cond_true.332, @cond_false.333 +@cond_true.332 + %.45 =l loadl %.2 + jmp @cond_join.334 +@cond_false.333 + %.46 =l loadl %.2 + %.47 =l loadl %.4 + %.48 =l add %.46, %.47 +@cond_join.334 + %.49 =l phi @cond_true.332 %.45, @cond_false.333 %.48 + ret %.49 +} +function l $safe_sub_func_int64_t_s_s(l %.1, l %.3) { +@start.345 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.346 + %.5 =l loadl %.2 + %.6 =l loadl %.4 + %.7 =l xor %.5, %.6 + %.8 =l loadl %.2 + %.9 =l loadl %.2 + %.10 =l loadl %.4 + %.11 =l xor %.9, %.10 + %.12 =l xor 9223372036854775807, 18446744073709551615 + %.13 =l and %.11, %.12 + %.14 =l xor %.8, %.13 + %.15 =l loadl %.4 + %.16 =l sub %.14, %.15 + %.17 =l loadl %.4 + %.18 =l xor %.16, %.17 + %.19 =l and %.7, %.18 + %.20 =l extsw 0 + %.21 =w csltl %.19, %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.347, @cond_false.348 +@cond_true.347 + %.23 =l loadl %.2 + jmp @cond_join.349 +@cond_false.348 + %.24 =l loadl %.2 + %.25 =l loadl %.4 + %.26 =l sub %.24, %.25 +@cond_join.349 + %.27 =l phi @cond_true.347 %.23, @cond_false.348 %.26 + ret %.27 +} +function l $safe_mul_func_int64_t_s_s(l %.1, l %.3) { +@start.350 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.351 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csgtl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_right.363, @logic_join.364 +@logic_right.363 + %.9 =l loadl %.4 + %.10 =l extsw 0 + %.11 =w csgtl %.9, %.10 + %.12 =w cnew %.11, 0 +@logic_join.364 + %.13 =w phi @body.351 %.8, @logic_right.363 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_right.361, @logic_join.362 +@logic_right.361 + %.15 =l loadl %.2 + %.16 =l loadl %.4 + %.17 =l div 9223372036854775807, %.16 + %.18 =w csgtl %.15, %.17 + %.19 =w cnew %.18, 0 +@logic_join.362 + %.20 =w phi @logic_join.364 %.14, @logic_right.361 %.19 + %.21 =w cnew %.20, 0 + jnz %.21, @logic_join.360, @logic_right.359 +@logic_right.359 + %.22 =l loadl %.2 + %.23 =l extsw 0 + %.24 =w csgtl %.22, %.23 + %.25 =w cnew %.24, 0 + jnz %.25, @logic_right.367, @logic_join.368 +@logic_right.367 + %.26 =l loadl %.4 + %.27 =l extsw 0 + %.28 =w cslel %.26, %.27 + %.29 =w cnew %.28, 0 +@logic_join.368 + %.30 =w phi @logic_right.359 %.25, @logic_right.367 %.29 + %.31 =w cnew %.30, 0 + jnz %.31, @logic_right.365, @logic_join.366 +@logic_right.365 + %.32 =l loadl %.4 + %.33 =l extsw 0 + %.34 =l sub %.33, 9223372036854775807 + %.35 =l extsw 1 + %.36 =l sub %.34, %.35 + %.37 =l loadl %.2 + %.38 =l div %.36, %.37 + %.39 =w csltl %.32, %.38 + %.40 =w cnew %.39, 0 +@logic_join.366 + %.41 =w phi @logic_join.368 %.31, @logic_right.365 %.40 + %.42 =w cnew %.41, 0 +@logic_join.360 + %.43 =w phi @logic_join.362 %.21, @logic_join.366 %.42 + %.44 =w cnew %.43, 0 + jnz %.44, @logic_join.358, @logic_right.357 +@logic_right.357 + %.45 =l loadl %.2 + %.46 =l extsw 0 + %.47 =w cslel %.45, %.46 + %.48 =w cnew %.47, 0 + jnz %.48, @logic_right.371, @logic_join.372 +@logic_right.371 + %.49 =l loadl %.4 + %.50 =l extsw 0 + %.51 =w csgtl %.49, %.50 + %.52 =w cnew %.51, 0 +@logic_join.372 + %.53 =w phi @logic_right.357 %.48, @logic_right.371 %.52 + %.54 =w cnew %.53, 0 + jnz %.54, @logic_right.369, @logic_join.370 +@logic_right.369 + %.55 =l loadl %.2 + %.56 =l extsw 0 + %.57 =l sub %.56, 9223372036854775807 + %.58 =l extsw 1 + %.59 =l sub %.57, %.58 + %.60 =l loadl %.4 + %.61 =l div %.59, %.60 + %.62 =w csltl %.55, %.61 + %.63 =w cnew %.62, 0 +@logic_join.370 + %.64 =w phi @logic_join.372 %.54, @logic_right.369 %.63 + %.65 =w cnew %.64, 0 +@logic_join.358 + %.66 =w phi @logic_join.360 %.44, @logic_join.370 %.65 + %.67 =w cnew %.66, 0 + jnz %.67, @logic_join.356, @logic_right.355 +@logic_right.355 + %.68 =l loadl %.2 + %.69 =l extsw 0 + %.70 =w cslel %.68, %.69 + %.71 =w cnew %.70, 0 + jnz %.71, @logic_right.377, @logic_join.378 +@logic_right.377 + %.72 =l loadl %.4 + %.73 =l extsw 0 + %.74 =w cslel %.72, %.73 + %.75 =w cnew %.74, 0 +@logic_join.378 + %.76 =w phi @logic_right.355 %.71, @logic_right.377 %.75 + %.77 =w cnew %.76, 0 + jnz %.77, @logic_right.375, @logic_join.376 +@logic_right.375 + %.78 =l loadl %.2 + %.79 =l extsw 0 + %.80 =w cnel %.78, %.79 + %.81 =w cnew %.80, 0 +@logic_join.376 + %.82 =w phi @logic_join.378 %.77, @logic_right.375 %.81 + %.83 =w cnew %.82, 0 + jnz %.83, @logic_right.373, @logic_join.374 +@logic_right.373 + %.84 =l loadl %.4 + %.85 =l loadl %.2 + %.86 =l div 9223372036854775807, %.85 + %.87 =w csltl %.84, %.86 + %.88 =w cnew %.87, 0 +@logic_join.374 + %.89 =w phi @logic_join.376 %.83, @logic_right.373 %.88 + %.90 =w cnew %.89, 0 +@logic_join.356 + %.91 =w phi @logic_join.358 %.67, @logic_join.374 %.90 + %.92 =w cnew %.91, 0 + jnz %.92, @cond_true.352, @cond_false.353 +@cond_true.352 + %.93 =l loadl %.2 + jmp @cond_join.354 +@cond_false.353 + %.94 =l loadl %.2 + %.95 =l loadl %.4 + %.96 =l mul %.94, %.95 +@cond_join.354 + %.97 =l phi @cond_true.352 %.93, @cond_false.353 %.96 + ret %.97 +} +function l $safe_mod_func_int64_t_s_s(l %.1, l %.3) { +@start.379 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.380 + %.5 =l loadl %.4 + %.6 =l extsw 0 + %.7 =w ceql %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.385, @logic_right.384 +@logic_right.384 + %.9 =l loadl %.2 + %.10 =l extsw 0 + %.11 =l sub %.10, 9223372036854775807 + %.12 =l extsw 1 + %.13 =l sub %.11, %.12 + %.14 =w ceql %.9, %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @logic_right.386, @logic_join.387 +@logic_right.386 + %.16 =l loadl %.4 + %.17 =w sub 0, 1 + %.18 =l extsw %.17 + %.19 =w ceql %.16, %.18 + %.20 =w cnew %.19, 0 +@logic_join.387 + %.21 =w phi @logic_right.384 %.15, @logic_right.386 %.20 + %.22 =w cnew %.21, 0 +@logic_join.385 + %.23 =w phi @body.380 %.8, @logic_join.387 %.22 + %.24 =w cnew %.23, 0 + jnz %.24, @cond_true.381, @cond_false.382 +@cond_true.381 + %.25 =l loadl %.2 + jmp @cond_join.383 +@cond_false.382 + %.26 =l loadl %.2 + %.27 =l loadl %.4 + %.28 =l rem %.26, %.27 +@cond_join.383 + %.29 =l phi @cond_true.381 %.25, @cond_false.382 %.28 + ret %.29 +} +function l $safe_div_func_int64_t_s_s(l %.1, l %.3) { +@start.388 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.389 + %.5 =l loadl %.4 + %.6 =l extsw 0 + %.7 =w ceql %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.394, @logic_right.393 +@logic_right.393 + %.9 =l loadl %.2 + %.10 =l extsw 0 + %.11 =l sub %.10, 9223372036854775807 + %.12 =l extsw 1 + %.13 =l sub %.11, %.12 + %.14 =w ceql %.9, %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @logic_right.395, @logic_join.396 +@logic_right.395 + %.16 =l loadl %.4 + %.17 =w sub 0, 1 + %.18 =l extsw %.17 + %.19 =w ceql %.16, %.18 + %.20 =w cnew %.19, 0 +@logic_join.396 + %.21 =w phi @logic_right.393 %.15, @logic_right.395 %.20 + %.22 =w cnew %.21, 0 +@logic_join.394 + %.23 =w phi @body.389 %.8, @logic_join.396 %.22 + %.24 =w cnew %.23, 0 + jnz %.24, @cond_true.390, @cond_false.391 +@cond_true.390 + %.25 =l loadl %.2 + jmp @cond_join.392 +@cond_false.391 + %.26 =l loadl %.2 + %.27 =l loadl %.4 + %.28 =l div %.26, %.27 +@cond_join.392 + %.29 =l phi @cond_true.390 %.25, @cond_false.391 %.28 + ret %.29 +} +function l $safe_lshift_func_int64_t_s_s(l %.1, w %.3) { +@start.397 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.398 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csltl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.407, @logic_right.406 +@logic_right.406 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.407 + %.13 =w phi @body.398 %.8, @logic_right.406 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.405, @logic_right.404 +@logic_right.404 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.405 + %.19 =w phi @logic_join.407 %.14, @logic_right.404 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @logic_join.403, @logic_right.402 +@logic_right.402 + %.21 =l loadl %.2 + %.22 =w loadsw %.4 + %.23 =w copy %.22 + %.24 =l sar 9223372036854775807, %.23 + %.25 =w csgtl %.21, %.24 + %.26 =w cnew %.25, 0 +@logic_join.403 + %.27 =w phi @logic_join.405 %.20, @logic_right.402 %.26 + %.28 =w cnew %.27, 0 + jnz %.28, @cond_true.399, @cond_false.400 +@cond_true.399 + %.29 =l loadl %.2 + jmp @cond_join.401 +@cond_false.400 + %.30 =l loadl %.2 + %.31 =w loadsw %.4 + %.32 =w copy %.31 + %.33 =l shl %.30, %.32 +@cond_join.401 + %.34 =l phi @cond_true.399 %.29, @cond_false.400 %.33 + ret %.34 +} +function l $safe_lshift_func_int64_t_s_u(l %.1, w %.3) { +@start.408 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.409 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csltl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.416, @logic_right.415 +@logic_right.415 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.416 + %.14 =w phi @body.409 %.8, @logic_right.415 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @logic_join.414, @logic_right.413 +@logic_right.413 + %.16 =l loadl %.2 + %.17 =w loaduw %.4 + %.18 =w copy %.17 + %.19 =l sar 9223372036854775807, %.18 + %.20 =w csgtl %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.414 + %.22 =w phi @logic_join.416 %.15, @logic_right.413 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @cond_true.410, @cond_false.411 +@cond_true.410 + %.24 =l loadl %.2 + jmp @cond_join.412 +@cond_false.411 + %.25 =l loadl %.2 + %.26 =w loaduw %.4 + %.27 =w copy %.26 + %.28 =l shl %.25, %.27 +@cond_join.412 + %.29 =l phi @cond_true.410 %.24, @cond_false.411 %.28 + ret %.29 +} +function l $safe_rshift_func_int64_t_s_s(l %.1, w %.3) { +@start.417 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.418 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csltl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.425, @logic_right.424 +@logic_right.424 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csltw %.10, 0 + %.12 =w cnew %.11, 0 +@logic_join.425 + %.13 =w phi @body.418 %.8, @logic_right.424 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.423, @logic_right.422 +@logic_right.422 + %.15 =w loadsw %.4 + %.16 =w copy %.15 + %.17 =w csgew %.16, 32 + %.18 =w cnew %.17, 0 +@logic_join.423 + %.19 =w phi @logic_join.425 %.14, @logic_right.422 %.18 + %.20 =w cnew %.19, 0 + jnz %.20, @cond_true.419, @cond_false.420 +@cond_true.419 + %.21 =l loadl %.2 + jmp @cond_join.421 +@cond_false.420 + %.22 =l loadl %.2 + %.23 =w loadsw %.4 + %.24 =w copy %.23 + %.25 =l sar %.22, %.24 +@cond_join.421 + %.26 =l phi @cond_true.419 %.21, @cond_false.420 %.25 + ret %.26 +} +function l $safe_rshift_func_int64_t_s_u(l %.1, w %.3) { +@start.426 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.427 + %.5 =l loadl %.2 + %.6 =l extsw 0 + %.7 =w csltl %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.432, @logic_right.431 +@logic_right.431 + %.9 =w loaduw %.4 + %.10 =w copy %.9 + %.11 =w copy 32 + %.12 =w cugew %.10, %.11 + %.13 =w cnew %.12, 0 +@logic_join.432 + %.14 =w phi @body.427 %.8, @logic_right.431 %.13 + %.15 =w cnew %.14, 0 + jnz %.15, @cond_true.428, @cond_false.429 +@cond_true.428 + %.16 =l loadl %.2 + jmp @cond_join.430 +@cond_false.429 + %.17 =l loadl %.2 + %.18 =w loaduw %.4 + %.19 =w copy %.18 + %.20 =l sar %.17, %.19 +@cond_join.430 + %.21 =l phi @cond_true.428 %.16, @cond_false.429 %.20 + ret %.21 +} +function w $safe_unary_minus_func_uint8_t_u(w %.1) { +@start.433 + %.2 =l alloc4 1 + storeb %.1, %.2 +@body.434 + %.3 =w loadub %.2 + %.4 =w extub %.3 + %.5 =w sub 0, %.4 + %.6 =w copy %.5 + ret %.6 +} +function w $safe_add_func_uint8_t_u_u(w %.1, w %.3) { +@start.435 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.436 + %.5 =w loadub %.2 + %.6 =w loadub %.4 + %.7 =w add %.5, %.6 + ret %.7 +} +function w $safe_sub_func_uint8_t_u_u(w %.1, w %.3) { +@start.437 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.438 + %.5 =w loadub %.2 + %.6 =w loadub %.4 + %.7 =w sub %.5, %.6 + ret %.7 +} +function w $safe_mul_func_uint8_t_u_u(w %.1, w %.3) { +@start.439 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.440 + %.5 =w loadub %.2 + %.6 =w extub %.5 + %.7 =w loadub %.4 + %.8 =w extub %.7 + %.9 =w mul %.6, %.8 + %.10 =w copy %.9 + ret %.10 +} +function w $safe_mod_func_uint8_t_u_u(w %.1, w %.3) { +@start.441 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.442 + %.5 =w loadub %.4 + %.6 =w extub %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.443, @cond_false.444 +@cond_true.443 + %.9 =w loadub %.2 + jmp @cond_join.445 +@cond_false.444 + %.10 =w loadub %.2 + %.11 =w loadub %.4 + %.12 =w urem %.10, %.11 +@cond_join.445 + %.13 =w phi @cond_true.443 %.9, @cond_false.444 %.12 + ret %.13 +} +function w $safe_div_func_uint8_t_u_u(w %.1, w %.3) { +@start.446 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 +@body.447 + %.5 =w loadub %.4 + %.6 =w extub %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.448, @cond_false.449 +@cond_true.448 + %.9 =w loadub %.2 + jmp @cond_join.450 +@cond_false.449 + %.10 =w loadub %.2 + %.11 =w loadub %.4 + %.12 =w udiv %.10, %.11 +@cond_join.450 + %.13 =w phi @cond_true.448 %.9, @cond_false.449 %.12 + ret %.13 +} +function w $safe_lshift_func_uint8_t_u_s(w %.1, w %.3) { +@start.451 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.452 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.459, @logic_right.458 +@logic_right.458 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.459 + %.13 =w phi @body.452 %.8, @logic_right.458 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.457, @logic_right.456 +@logic_right.456 + %.15 =w loadub %.2 + %.16 =w extub %.15 + %.17 =w loadsw %.4 + %.18 =w copy %.17 + %.19 =w sar 255, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.457 + %.22 =w phi @logic_join.459 %.14, @logic_right.456 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @cond_true.453, @cond_false.454 +@cond_true.453 + %.24 =w loadub %.2 + %.25 =w extub %.24 + jmp @cond_join.455 +@cond_false.454 + %.26 =w loadub %.2 + %.27 =w extub %.26 + %.28 =w loadsw %.4 + %.29 =w copy %.28 + %.30 =w shl %.27, %.29 +@cond_join.455 + %.31 =w phi @cond_true.453 %.25, @cond_false.454 %.30 + %.32 =w copy %.31 + ret %.32 +} +function w $safe_lshift_func_uint8_t_u_u(w %.1, w %.3) { +@start.460 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.461 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @logic_join.466, @logic_right.465 +@logic_right.465 + %.10 =w loadub %.2 + %.11 =w extub %.10 + %.12 =w loaduw %.4 + %.13 =w copy %.12 + %.14 =w sar 255, %.13 + %.15 =w csgtw %.11, %.14 + %.16 =w cnew %.15, 0 +@logic_join.466 + %.17 =w phi @body.461 %.9, @logic_right.465 %.16 + %.18 =w cnew %.17, 0 + jnz %.18, @cond_true.462, @cond_false.463 +@cond_true.462 + %.19 =w loadub %.2 + %.20 =w extub %.19 + jmp @cond_join.464 +@cond_false.463 + %.21 =w loadub %.2 + %.22 =w extub %.21 + %.23 =w loaduw %.4 + %.24 =w copy %.23 + %.25 =w shl %.22, %.24 +@cond_join.464 + %.26 =w phi @cond_true.462 %.20, @cond_false.463 %.25 + %.27 =w copy %.26 + ret %.27 +} +function w $safe_rshift_func_uint8_t_u_s(w %.1, w %.3) { +@start.467 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.468 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.473, @logic_right.472 +@logic_right.472 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.473 + %.13 =w phi @body.468 %.8, @logic_right.472 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.469, @cond_false.470 +@cond_true.469 + %.15 =w loadub %.2 + %.16 =w extub %.15 + jmp @cond_join.471 +@cond_false.470 + %.17 =w loadub %.2 + %.18 =w extub %.17 + %.19 =w loadsw %.4 + %.20 =w copy %.19 + %.21 =w sar %.18, %.20 +@cond_join.471 + %.22 =w phi @cond_true.469 %.16, @cond_false.470 %.21 + %.23 =w copy %.22 + ret %.23 +} +function w $safe_rshift_func_uint8_t_u_u(w %.1, w %.3) { +@start.474 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.475 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.476, @cond_false.477 +@cond_true.476 + %.10 =w loadub %.2 + %.11 =w extub %.10 + jmp @cond_join.478 +@cond_false.477 + %.12 =w loadub %.2 + %.13 =w extub %.12 + %.14 =w loaduw %.4 + %.15 =w copy %.14 + %.16 =w sar %.13, %.15 +@cond_join.478 + %.17 =w phi @cond_true.476 %.11, @cond_false.477 %.16 + %.18 =w copy %.17 + ret %.18 +} +function w $safe_unary_minus_func_uint16_t_u(w %.1) { +@start.479 + %.2 =l alloc4 2 + storeh %.1, %.2 +@body.480 + %.3 =w loaduh %.2 + %.4 =w extuh %.3 + %.5 =w sub 0, %.4 + %.6 =w copy %.5 + ret %.6 +} +function w $safe_add_func_uint16_t_u_u(w %.1, w %.3) { +@start.481 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.482 + %.5 =w loaduh %.2 + %.6 =w loaduh %.4 + %.7 =w add %.5, %.6 + ret %.7 +} +function w $safe_sub_func_uint16_t_u_u(w %.1, w %.3) { +@start.483 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.484 + %.5 =w loaduh %.2 + %.6 =w loaduh %.4 + %.7 =w sub %.5, %.6 + ret %.7 +} +function w $safe_mul_func_uint16_t_u_u(w %.1, w %.3) { +@start.485 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.486 + %.5 =w loaduh %.2 + %.6 =w extuh %.5 + %.7 =w loaduh %.4 + %.8 =w extuh %.7 + %.9 =w mul %.6, %.8 + %.10 =w copy %.9 + ret %.10 +} +function w $safe_mod_func_uint16_t_u_u(w %.1, w %.3) { +@start.487 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.488 + %.5 =w loaduh %.4 + %.6 =w extuh %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.489, @cond_false.490 +@cond_true.489 + %.9 =w loaduh %.2 + jmp @cond_join.491 +@cond_false.490 + %.10 =w loaduh %.2 + %.11 =w loaduh %.4 + %.12 =w urem %.10, %.11 +@cond_join.491 + %.13 =w phi @cond_true.489 %.9, @cond_false.490 %.12 + ret %.13 +} +function w $safe_div_func_uint16_t_u_u(w %.1, w %.3) { +@start.492 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 +@body.493 + %.5 =w loaduh %.4 + %.6 =w extuh %.5 + %.7 =w ceqw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.494, @cond_false.495 +@cond_true.494 + %.9 =w loaduh %.2 + jmp @cond_join.496 +@cond_false.495 + %.10 =w loaduh %.2 + %.11 =w loaduh %.4 + %.12 =w udiv %.10, %.11 +@cond_join.496 + %.13 =w phi @cond_true.494 %.9, @cond_false.495 %.12 + ret %.13 +} +function w $safe_lshift_func_uint16_t_u_s(w %.1, w %.3) { +@start.497 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.498 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.505, @logic_right.504 +@logic_right.504 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.505 + %.13 =w phi @body.498 %.8, @logic_right.504 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.503, @logic_right.502 +@logic_right.502 + %.15 =w loaduh %.2 + %.16 =w extuh %.15 + %.17 =w loadsw %.4 + %.18 =w copy %.17 + %.19 =w sar 65535, %.18 + %.20 =w csgtw %.16, %.19 + %.21 =w cnew %.20, 0 +@logic_join.503 + %.22 =w phi @logic_join.505 %.14, @logic_right.502 %.21 + %.23 =w cnew %.22, 0 + jnz %.23, @cond_true.499, @cond_false.500 +@cond_true.499 + %.24 =w loaduh %.2 + %.25 =w extuh %.24 + jmp @cond_join.501 +@cond_false.500 + %.26 =w loaduh %.2 + %.27 =w extuh %.26 + %.28 =w loadsw %.4 + %.29 =w copy %.28 + %.30 =w shl %.27, %.29 +@cond_join.501 + %.31 =w phi @cond_true.499 %.25, @cond_false.500 %.30 + %.32 =w copy %.31 + ret %.32 +} +function w $safe_lshift_func_uint16_t_u_u(w %.1, w %.3) { +@start.506 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.507 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @logic_join.512, @logic_right.511 +@logic_right.511 + %.10 =w loaduh %.2 + %.11 =w extuh %.10 + %.12 =w loaduw %.4 + %.13 =w copy %.12 + %.14 =w sar 65535, %.13 + %.15 =w csgtw %.11, %.14 + %.16 =w cnew %.15, 0 +@logic_join.512 + %.17 =w phi @body.507 %.9, @logic_right.511 %.16 + %.18 =w cnew %.17, 0 + jnz %.18, @cond_true.508, @cond_false.509 +@cond_true.508 + %.19 =w loaduh %.2 + %.20 =w extuh %.19 + jmp @cond_join.510 +@cond_false.509 + %.21 =w loaduh %.2 + %.22 =w extuh %.21 + %.23 =w loaduw %.4 + %.24 =w copy %.23 + %.25 =w shl %.22, %.24 +@cond_join.510 + %.26 =w phi @cond_true.508 %.20, @cond_false.509 %.25 + %.27 =w copy %.26 + ret %.27 +} +function w $safe_rshift_func_uint16_t_u_s(w %.1, w %.3) { +@start.513 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.514 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.519, @logic_right.518 +@logic_right.518 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.519 + %.13 =w phi @body.514 %.8, @logic_right.518 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.515, @cond_false.516 +@cond_true.515 + %.15 =w loaduh %.2 + %.16 =w extuh %.15 + jmp @cond_join.517 +@cond_false.516 + %.17 =w loaduh %.2 + %.18 =w extuh %.17 + %.19 =w loadsw %.4 + %.20 =w copy %.19 + %.21 =w sar %.18, %.20 +@cond_join.517 + %.22 =w phi @cond_true.515 %.16, @cond_false.516 %.21 + %.23 =w copy %.22 + ret %.23 +} +function w $safe_rshift_func_uint16_t_u_u(w %.1, w %.3) { +@start.520 + %.2 =l alloc4 2 + storeh %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.521 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.522, @cond_false.523 +@cond_true.522 + %.10 =w loaduh %.2 + %.11 =w extuh %.10 + jmp @cond_join.524 +@cond_false.523 + %.12 =w loaduh %.2 + %.13 =w extuh %.12 + %.14 =w loaduw %.4 + %.15 =w copy %.14 + %.16 =w sar %.13, %.15 +@cond_join.524 + %.17 =w phi @cond_true.522 %.11, @cond_false.523 %.16 + %.18 =w copy %.17 + ret %.18 +} +function w $safe_unary_minus_func_uint32_t_u(w %.1) { +@start.525 + %.2 =l alloc4 4 + storew %.1, %.2 +@body.526 + %.3 =w copy 0 + %.4 =w loaduw %.2 + %.5 =w sub %.3, %.4 + ret %.5 +} +function w $safe_add_func_uint32_t_u_u(w %.1, w %.3) { +@start.527 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.528 + %.5 =w loaduw %.2 + %.6 =w loaduw %.4 + %.7 =w add %.5, %.6 + ret %.7 +} +function w $safe_sub_func_uint32_t_u_u(w %.1, w %.3) { +@start.529 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.530 + %.5 =w loaduw %.2 + %.6 =w loaduw %.4 + %.7 =w sub %.5, %.6 + ret %.7 +} +function w $safe_mul_func_uint32_t_u_u(w %.1, w %.3) { +@start.531 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.532 + %.5 =w loaduw %.2 + %.6 =w copy %.5 + %.7 =w loaduw %.4 + %.8 =w copy %.7 + %.9 =w mul %.6, %.8 + ret %.9 +} +function w $safe_mod_func_uint32_t_u_u(w %.1, w %.3) { +@start.533 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.534 + %.5 =w loaduw %.4 + %.6 =w copy 0 + %.7 =w ceqw %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.535, @cond_false.536 +@cond_true.535 + %.9 =w loaduw %.2 + jmp @cond_join.537 +@cond_false.536 + %.10 =w loaduw %.2 + %.11 =w loaduw %.4 + %.12 =w urem %.10, %.11 +@cond_join.537 + %.13 =w phi @cond_true.535 %.9, @cond_false.536 %.12 + ret %.13 +} +function w $safe_div_func_uint32_t_u_u(w %.1, w %.3) { +@start.538 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.539 + %.5 =w loaduw %.4 + %.6 =w copy 0 + %.7 =w ceqw %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.540, @cond_false.541 +@cond_true.540 + %.9 =w loaduw %.2 + jmp @cond_join.542 +@cond_false.541 + %.10 =w loaduw %.2 + %.11 =w loaduw %.4 + %.12 =w udiv %.10, %.11 +@cond_join.542 + %.13 =w phi @cond_true.540 %.9, @cond_false.541 %.12 + ret %.13 +} +function w $safe_lshift_func_uint32_t_u_s(w %.1, w %.3) { +@start.543 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.544 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.551, @logic_right.550 +@logic_right.550 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.551 + %.13 =w phi @body.544 %.8, @logic_right.550 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.549, @logic_right.548 +@logic_right.548 + %.15 =w loaduw %.2 + %.16 =w loadsw %.4 + %.17 =w copy %.16 + %.18 =w shr 4294967295, %.17 + %.19 =w cugtw %.15, %.18 + %.20 =w cnew %.19, 0 +@logic_join.549 + %.21 =w phi @logic_join.551 %.14, @logic_right.548 %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.545, @cond_false.546 +@cond_true.545 + %.23 =w loaduw %.2 + jmp @cond_join.547 +@cond_false.546 + %.24 =w loaduw %.2 + %.25 =w loadsw %.4 + %.26 =w copy %.25 + %.27 =w shl %.24, %.26 +@cond_join.547 + %.28 =w phi @cond_true.545 %.23, @cond_false.546 %.27 + ret %.28 +} +function w $safe_lshift_func_uint32_t_u_u(w %.1, w %.3) { +@start.552 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.553 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @logic_join.558, @logic_right.557 +@logic_right.557 + %.10 =w loaduw %.2 + %.11 =w loaduw %.4 + %.12 =w copy %.11 + %.13 =w shr 4294967295, %.12 + %.14 =w cugtw %.10, %.13 + %.15 =w cnew %.14, 0 +@logic_join.558 + %.16 =w phi @body.553 %.9, @logic_right.557 %.15 + %.17 =w cnew %.16, 0 + jnz %.17, @cond_true.554, @cond_false.555 +@cond_true.554 + %.18 =w loaduw %.2 + jmp @cond_join.556 +@cond_false.555 + %.19 =w loaduw %.2 + %.20 =w loaduw %.4 + %.21 =w copy %.20 + %.22 =w shl %.19, %.21 +@cond_join.556 + %.23 =w phi @cond_true.554 %.18, @cond_false.555 %.22 + ret %.23 +} +function w $safe_rshift_func_uint32_t_u_s(w %.1, w %.3) { +@start.559 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.560 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.565, @logic_right.564 +@logic_right.564 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.565 + %.13 =w phi @body.560 %.8, @logic_right.564 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.561, @cond_false.562 +@cond_true.561 + %.15 =w loaduw %.2 + jmp @cond_join.563 +@cond_false.562 + %.16 =w loaduw %.2 + %.17 =w loadsw %.4 + %.18 =w copy %.17 + %.19 =w shr %.16, %.18 +@cond_join.563 + %.20 =w phi @cond_true.561 %.15, @cond_false.562 %.19 + ret %.20 +} +function w $safe_rshift_func_uint32_t_u_u(w %.1, w %.3) { +@start.566 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.567 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.568, @cond_false.569 +@cond_true.568 + %.10 =w loaduw %.2 + jmp @cond_join.570 +@cond_false.569 + %.11 =w loaduw %.2 + %.12 =w loaduw %.4 + %.13 =w copy %.12 + %.14 =w shr %.11, %.13 +@cond_join.570 + %.15 =w phi @cond_true.568 %.10, @cond_false.569 %.14 + ret %.15 +} +function l $safe_unary_minus_func_uint64_t_u(l %.1) { +@start.571 + %.2 =l alloc8 8 + storel %.1, %.2 +@body.572 + %.3 =l extsw 0 + %.4 =l loadl %.2 + %.5 =l sub %.3, %.4 + ret %.5 +} +function l $safe_add_func_uint64_t_u_u(l %.1, l %.3) { +@start.573 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.574 + %.5 =l loadl %.2 + %.6 =l loadl %.4 + %.7 =l add %.5, %.6 + ret %.7 +} +function l $safe_sub_func_uint64_t_u_u(l %.1, l %.3) { +@start.575 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.576 + %.5 =l loadl %.2 + %.6 =l loadl %.4 + %.7 =l sub %.5, %.6 + ret %.7 +} +function l $safe_mul_func_uint64_t_u_u(l %.1, l %.3) { +@start.577 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.578 + %.5 =l loadl %.2 + %.6 =l copy %.5 + %.7 =l loadl %.4 + %.8 =l copy %.7 + %.9 =l mul %.6, %.8 + ret %.9 +} +function l $safe_mod_func_uint64_t_u_u(l %.1, l %.3) { +@start.579 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.580 + %.5 =l loadl %.4 + %.6 =l extsw 0 + %.7 =w ceql %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.581, @cond_false.582 +@cond_true.581 + %.9 =l loadl %.2 + jmp @cond_join.583 +@cond_false.582 + %.10 =l loadl %.2 + %.11 =l loadl %.4 + %.12 =l urem %.10, %.11 +@cond_join.583 + %.13 =l phi @cond_true.581 %.9, @cond_false.582 %.12 + ret %.13 +} +function l $safe_div_func_uint64_t_u_u(l %.1, l %.3) { +@start.584 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 +@body.585 + %.5 =l loadl %.4 + %.6 =l extsw 0 + %.7 =w ceql %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.586, @cond_false.587 +@cond_true.586 + %.9 =l loadl %.2 + jmp @cond_join.588 +@cond_false.587 + %.10 =l loadl %.2 + %.11 =l loadl %.4 + %.12 =l udiv %.10, %.11 +@cond_join.588 + %.13 =l phi @cond_true.586 %.9, @cond_false.587 %.12 + ret %.13 +} +function l $safe_lshift_func_uint64_t_u_s(l %.1, w %.3) { +@start.589 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.590 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.597, @logic_right.596 +@logic_right.596 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.597 + %.13 =w phi @body.590 %.8, @logic_right.596 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @logic_join.595, @logic_right.594 +@logic_right.594 + %.15 =l loadl %.2 + %.16 =w loadsw %.4 + %.17 =w copy %.16 + %.18 =l shr 18446744073709551615, %.17 + %.19 =w cugtl %.15, %.18 + %.20 =w cnew %.19, 0 +@logic_join.595 + %.21 =w phi @logic_join.597 %.14, @logic_right.594 %.20 + %.22 =w cnew %.21, 0 + jnz %.22, @cond_true.591, @cond_false.592 +@cond_true.591 + %.23 =l loadl %.2 + jmp @cond_join.593 +@cond_false.592 + %.24 =l loadl %.2 + %.25 =w loadsw %.4 + %.26 =w copy %.25 + %.27 =l shl %.24, %.26 +@cond_join.593 + %.28 =l phi @cond_true.591 %.23, @cond_false.592 %.27 + ret %.28 +} +function l $safe_lshift_func_uint64_t_u_u(l %.1, w %.3) { +@start.598 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.599 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @logic_join.604, @logic_right.603 +@logic_right.603 + %.10 =l loadl %.2 + %.11 =w loaduw %.4 + %.12 =w copy %.11 + %.13 =l shr 18446744073709551615, %.12 + %.14 =w cugtl %.10, %.13 + %.15 =w cnew %.14, 0 +@logic_join.604 + %.16 =w phi @body.599 %.9, @logic_right.603 %.15 + %.17 =w cnew %.16, 0 + jnz %.17, @cond_true.600, @cond_false.601 +@cond_true.600 + %.18 =l loadl %.2 + jmp @cond_join.602 +@cond_false.601 + %.19 =l loadl %.2 + %.20 =w loaduw %.4 + %.21 =w copy %.20 + %.22 =l shl %.19, %.21 +@cond_join.602 + %.23 =l phi @cond_true.600 %.18, @cond_false.601 %.22 + ret %.23 +} +function l $safe_rshift_func_uint64_t_u_s(l %.1, w %.3) { +@start.605 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.606 + %.5 =w loadsw %.4 + %.6 =w copy %.5 + %.7 =w csltw %.6, 0 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.611, @logic_right.610 +@logic_right.610 + %.9 =w loadsw %.4 + %.10 =w copy %.9 + %.11 =w csgew %.10, 32 + %.12 =w cnew %.11, 0 +@logic_join.611 + %.13 =w phi @body.606 %.8, @logic_right.610 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.607, @cond_false.608 +@cond_true.607 + %.15 =l loadl %.2 + jmp @cond_join.609 +@cond_false.608 + %.16 =l loadl %.2 + %.17 =w loadsw %.4 + %.18 =w copy %.17 + %.19 =l shr %.16, %.18 +@cond_join.609 + %.20 =l phi @cond_true.607 %.15, @cond_false.608 %.19 + ret %.20 +} +function l $safe_rshift_func_uint64_t_u_u(l %.1, w %.3) { +@start.612 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.613 + %.5 =w loaduw %.4 + %.6 =w copy %.5 + %.7 =w copy 32 + %.8 =w cugew %.6, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.614, @cond_false.615 +@cond_true.614 + %.10 =l loadl %.2 + jmp @cond_join.616 +@cond_false.615 + %.11 =l loadl %.2 + %.12 =w loaduw %.4 + %.13 =w copy %.12 + %.14 =l shr %.11, %.13 +@cond_join.616 + %.15 =l phi @cond_true.614 %.10, @cond_false.615 %.14 + ret %.15 +} +function s $safe_add_func_float_f_f(s %.1, s %.3) { +@start.617 + %.2 =l alloc4 4 + stores %.1, %.2 + %.4 =l alloc4 4 + stores %.3, %.4 +@body.618 + %.5 =s swtof 0 + %.6 =s mul s_0x1p-1, s_0x1.fffffe091ff3dp+127 + %.7 =w cgts %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.619, @cond_false.620 +@cond_true.619 + %.9 =s loads %.2 + jmp @cond_join.621 +@cond_false.620 + %.10 =s loads %.2 + %.11 =s loads %.4 + %.12 =s add %.10, %.11 +@cond_join.621 + %.13 =s phi @cond_true.619 %.9, @cond_false.620 %.12 + ret %.13 +} +function s $safe_sub_func_float_f_f(s %.1, s %.3) { +@start.622 + %.2 =l alloc4 4 + stores %.1, %.2 + %.4 =l alloc4 4 + stores %.3, %.4 +@body.623 + %.5 =s swtof 0 + %.6 =s mul s_0x1p-1, s_0x1.fffffe091ff3dp+127 + %.7 =w cgts %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.624, @cond_false.625 +@cond_true.624 + %.9 =s loads %.2 + jmp @cond_join.626 +@cond_false.625 + %.10 =s loads %.2 + %.11 =s loads %.4 + %.12 =s sub %.10, %.11 +@cond_join.626 + %.13 =s phi @cond_true.624 %.9, @cond_false.625 %.12 + ret %.13 +} +function s $safe_mul_func_float_f_f(s %.1, s %.3) { +@start.627 + %.2 =l alloc4 4 + stores %.1, %.2 + %.4 =l alloc4 4 + stores %.3, %.4 +@body.628 + %.5 =s swtof 0 + %.6 =s mul s_0x1p-28, s_0x1.fffffe091ff3dp+127 + %.7 =s mul s_0x1p-100, %.6 + %.8 =w cgts %.5, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.629, @cond_false.630 +@cond_true.629 + %.10 =s loads %.2 + jmp @cond_join.631 +@cond_false.630 + %.11 =s loads %.2 + %.12 =s loads %.4 + %.13 =s mul %.11, %.12 +@cond_join.631 + %.14 =s phi @cond_true.629 %.10, @cond_false.630 %.13 + ret %.14 +} +function s $safe_div_func_float_f_f(s %.1, s %.3) { +@start.632 + %.2 =l alloc4 4 + stores %.1, %.2 + %.4 =l alloc4 4 + stores %.3, %.4 +@body.633 + %.5 =s swtof 0 + %.6 =w clts %.5, s_0x1p+0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_right.637, @logic_join.638 +@logic_right.637 + %.8 =s loads %.4 + %.9 =w ceqs %.8, s_0x0p+0 + %.10 =w cnew %.9, 0 + jnz %.10, @logic_join.640, @logic_right.639 +@logic_right.639 + %.11 =s swtof 0 + %.12 =s mul s_0x1p-49, s_0x1.fffffe091ff3dp+127 + %.13 =s mul s_0x1p-100, %.12 + %.14 =w cgts %.11, %.13 + %.15 =w cnew %.14, 0 +@logic_join.640 + %.16 =w phi @logic_right.637 %.10, @logic_right.639 %.15 + %.17 =w cnew %.16, 0 +@logic_join.638 + %.18 =w phi @body.633 %.7, @logic_join.640 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @cond_true.634, @cond_false.635 +@cond_true.634 + %.20 =s loads %.2 + jmp @cond_join.636 +@cond_false.635 + %.21 =s loads %.2 + %.22 =s loads %.4 + %.23 =s div %.21, %.22 +@cond_join.636 + %.24 =s phi @cond_true.634 %.20, @cond_false.635 %.23 + ret %.24 +} +function d $safe_add_func_double_f_f(d %.1, d %.3) { +@start.641 + %.2 =l alloc8 8 + stored %.1, %.2 + %.4 =l alloc8 8 + stored %.3, %.4 +@body.642 + %.5 =d swtof 0 + %.6 =d mul d_0x1p-1, d_0x1.fffffffffffffp+1023 + %.7 =w cgtd %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.643, @cond_false.644 +@cond_true.643 + %.9 =d loadd %.2 + jmp @cond_join.645 +@cond_false.644 + %.10 =d loadd %.2 + %.11 =d loadd %.4 + %.12 =d add %.10, %.11 +@cond_join.645 + %.13 =d phi @cond_true.643 %.9, @cond_false.644 %.12 + ret %.13 +} +function d $safe_sub_func_double_f_f(d %.1, d %.3) { +@start.646 + %.2 =l alloc8 8 + stored %.1, %.2 + %.4 =l alloc8 8 + stored %.3, %.4 +@body.647 + %.5 =d swtof 0 + %.6 =d mul d_0x1p-1, d_0x1.fffffffffffffp+1023 + %.7 =w cgtd %.5, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @cond_true.648, @cond_false.649 +@cond_true.648 + %.9 =d loadd %.2 + jmp @cond_join.650 +@cond_false.649 + %.10 =d loadd %.2 + %.11 =d loadd %.4 + %.12 =d sub %.10, %.11 +@cond_join.650 + %.13 =d phi @cond_true.648 %.9, @cond_false.649 %.12 + ret %.13 +} +function d $safe_mul_func_double_f_f(d %.1, d %.3) { +@start.651 + %.2 =l alloc8 8 + stored %.1, %.2 + %.4 =l alloc8 8 + stored %.3, %.4 +@body.652 + %.5 =d swtof 0 + %.6 =d mul d_0x1p-924, d_0x1.fffffffffffffp+1023 + %.7 =d mul d_0x1p-100, %.6 + %.8 =w cgtd %.5, %.7 + %.9 =w cnew %.8, 0 + jnz %.9, @cond_true.653, @cond_false.654 +@cond_true.653 + %.10 =d loadd %.2 + jmp @cond_join.655 +@cond_false.654 + %.11 =d loadd %.2 + %.12 =d loadd %.4 + %.13 =d mul %.11, %.12 +@cond_join.655 + %.14 =d phi @cond_true.653 %.10, @cond_false.654 %.13 + ret %.14 +} +function d $safe_div_func_double_f_f(d %.1, d %.3) { +@start.656 + %.2 =l alloc8 8 + stored %.1, %.2 + %.4 =l alloc8 8 + stored %.3, %.4 +@body.657 + %.5 =d swtof 0 + %.6 =w cltd %.5, d_0x1p+0 + %.7 =w cnew %.6, 0 + jnz %.7, @logic_right.661, @logic_join.662 +@logic_right.661 + %.8 =d loadd %.4 + %.9 =w ceqd %.8, d_0x0p+0 + %.10 =w cnew %.9, 0 + jnz %.10, @logic_join.664, @logic_right.663 +@logic_right.663 + %.11 =d swtof 0 + %.12 =d mul d_0x1p-974, d_0x1.fffffffffffffp+1023 + %.13 =d mul d_0x1p-100, %.12 + %.14 =w cgtd %.11, %.13 + %.15 =w cnew %.14, 0 +@logic_join.664 + %.16 =w phi @logic_right.661 %.10, @logic_right.663 %.15 + %.17 =w cnew %.16, 0 +@logic_join.662 + %.18 =w phi @body.657 %.7, @logic_join.664 %.17 + %.19 =w cnew %.18, 0 + jnz %.19, @cond_true.658, @cond_false.659 +@cond_true.658 + %.20 =d loadd %.2 + jmp @cond_join.660 +@cond_false.659 + %.21 =d loadd %.2 + %.22 =d loadd %.4 + %.23 =d div %.21, %.22 +@cond_join.660 + %.24 =d phi @cond_true.658 %.20, @cond_false.659 %.23 + ret %.24 +} +function w $safe_convert_func_float_to_int32_t(s %.1) { +@start.665 + %.2 =l alloc4 4 + stores %.1, %.2 +@body.666 + %.3 =s loads %.2 + %.4 =w sub 0, 2147483647 + %.5 =w sub %.4, 1 + %.6 =s swtof %.5 + %.7 =w cles %.3, %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @logic_join.671, @logic_right.670 +@logic_right.670 + %.9 =s loads %.2 + %.10 =s swtof 2147483647 + %.11 =w cges %.9, %.10 + %.12 =w cnew %.11, 0 +@logic_join.671 + %.13 =w phi @body.666 %.8, @logic_right.670 %.12 + %.14 =w cnew %.13, 0 + jnz %.14, @cond_true.667, @cond_false.668 +@cond_true.667 + jmp @cond_join.669 +@cond_false.668 + %.15 =s loads %.2 + %.16 =w stosi %.15 +@cond_join.669 + %.17 =w phi @cond_true.667 2147483647, @cond_false.668 %.16 + ret %.17 +} +function $platform_main_begin() { +@start.672 +@body.673 + ret +} +function $crc32_gentab() { +@start.674 +@body.675 + ret +} +data $.Lstring.93 = align 1 { b "%s %d\012", z 1, } +function $transparent_crc(l %.1, l %.3, w %.5) { +@start.676 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.6 =l alloc4 4 + storew %.5, %.6 +@body.677 + %.7 =w loadsw %.6 + %.8 =w cnew %.7, 0 + jnz %.8, @if_true.678, @if_false.679 +@if_true.678 + %.9 =l copy $.Lstring.93 + %.10 =l loadl %.4 + %.11 =l loadl %.2 + %.12 =w call $printf(l %.9, ..., l %.10, l %.11) +@if_false.679 + %.13 =l loadl $crc32_context + %.14 =l loadl %.2 + %.15 =l add %.13, %.14 + storel %.15, $crc32_context + ret +} +data $.Lstring.95 = align 1 { b "...checksum after hashing %s : %lX\012", z 1, } +function $transparent_crc_bytes(l %.1, w %.3, l %.5, w %.7) { +@start.680 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 + %.6 =l alloc8 8 + storel %.5, %.6 + %.8 =l alloc4 4 + storew %.7, %.8 + %.9 =l alloc4 4 +@body.681 + storew 0, %.9 +@for_cond.682 + %.10 =w loadsw %.9 + %.11 =w loadsw %.4 + %.12 =w csltw %.10, %.11 + jnz %.12, @for_body.683, @for_join.685 +@for_body.683 + %.13 =l loadl $crc32_context + %.14 =l loadl %.2 + %.15 =w loadsw %.9 + %.16 =l extsw %.15 + %.17 =l mul %.16, 1 + %.18 =l add %.14, %.17 + %.19 =w loadsb %.18 + %.20 =l extsb %.19 + %.21 =l add %.13, %.20 + storel %.21, $crc32_context +@for_cont.684 + %.22 =w loadsw %.9 + %.23 =w add %.22, 1 + storew %.23, %.9 + jmp @for_cond.682 +@for_join.685 + %.24 =w loadsw %.8 + %.25 =w cnew %.24, 0 + jnz %.25, @if_true.686, @if_false.687 +@if_true.686 + %.26 =l copy $.Lstring.95 + %.27 =l loadl %.6 + %.28 =l loadl $crc32_context + %.29 =l copy 4294967295 + %.30 =l xor %.28, %.29 + %.31 =w call $printf(l %.26, ..., l %.27, l %.30) +@if_false.687 + ret +} +data $.Lstring.97 = align 1 { b "checksum = %llx\012", z 1, } +function $platform_main_end(l %.1, w %.3) { +@start.688 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 +@body.689 + %.5 =w loadsw %.4 + %.6 =w ceqw %.5, 0 + %.7 =w cnew %.6, 0 + jnz %.7, @if_true.690, @if_false.691 +@if_true.690 + %.8 =l copy $.Lstring.97 + %.9 =l loadl %.2 + %.10 =w call $printf(l %.8, ..., l %.9) +@if_false.691 + ret +} +data $g_2 = align 1 { b 215, } +data $g_13 = align 4 { w 18446744073709551612, w 3113531208, w 447237310, w 657824592, w 447237310, w 3113531208, w 18446744073709551612, w 0, w 3145062956, w 1458304211, w 1458304211, w 3145062956, w 0, w 18446744073709551612, w 3113531208, w 447237310, w 657824592, w 447237310, w 3113531208, w 18446744073709551612, w 0, w 3145062956, w 1458304211, w 1458304211, w 3145062956, w 0, w 18446744073709551612, } +data $g_24 = align 4 { w 18446744073709551613, } +data $g_23 = align 8 { l $g_24, } +data $g_38 = align 8 { l 0, } +data $g_46 = align 1 { b 0, } +data $g_50 = align 4 { w 1, } +data $g_57 = align 1 { b 224, } +data $g_58 = align 8 { l 8, } +data $g_80 = align 8 { l 1104779632179292239, } +data $g_81 = align 2 { h 18446744073709551615, } +data $g_82 = align 8 { l 17444925578407733218, } +data $g_84 = align 4 { w 1166649911, } +data $g_88 = align 8 { l $g_38, } +data $g_115 = align 4 { w 18446744073709551613, } +data $g_130 = align 4 { w 18446744073709551607, w 0, h 5458, z 2, w 0, w 397636938, } +data $g_132 = align 1 { b 65, b 65, b 65, b 65, b 65, b 65, } +data $g_173 = align 8 { l $g_130 + 0, } +data $g_172 = align 8 { l $g_173, l $g_173, } +data $g_185 = align 8 { b 1, z 7, l 1, w 4071577471, z 4, l 1, w 3048012705, w 1, w 1, w 1, w 18446744073709551615, z 4 } +data $g_201 = align 8 { l $g_185, } +data $g_265 = align 8 { b 0, z 7, l 9646574861175543734, w 3690576639, z 4, l 18446744073709551615, w 4294967292, w 2450216573, w 18446744073709551612, w 1636002719, w 1518760778, z 4 } +data $g_296 = align 8 { l $g_201, } +data $g_364 = align 8 { l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, l $g_185 + 0, } +data $g_363 = align 8 { l $g_364 + 48, } +data $g_394 = align 8 { l 0, } +data $g_399 = align 8 { l 1, } +data $g_422 = align 8 { l $g_265 + 32, } +data $g_425 = align 2 { h 22013, } +data $g_477 = align 8 { l 5223132716906150842, } +data $g_518 = align 8 { b 255, z 7, l 17526030672371278218, w 1, z 4, l 13446109256110216392, w 2, w 18446744073709551610, w 0, w 8, w 1038833289, z 4 } +data $g_566 = align 1 { b 9, } +data $g_619 = align 2 { h 65535, } +data $g_629 = align 1 { b 1, } +data $g_631 = align 1 { b 70, } +data $g_634 = align 8 { l 0, } +data $g_662 = align 8 { l 0, } +data $g_776 = align 8 { l 0, } +data $g_775 = align 8 { l $g_776, l $g_776, l $g_776, l $g_776, l $g_776, } +data $g_794 = align 4 { w 1906903063, w 6, h 0, z 2, w 2863962639, w 3301065942, } +data $g_850 = align 8 { l 0, l 0, } +data $g_858 = align 2 { h 7, } +data $g_937 = align 1 { b 4, } +data $g_1018 = align 4 { w 0, } +data $g_1038 = align 8 { l $g_422, } +data $g_1037 = align 8 { l $g_1038, } +data $g_1070 = align 8 { l $g_662, } +data $g_1069 = align 8 { l $g_1070, } +data $g_1123 = align 8 { l $g_794, } +data $g_1130 = align 1 { b 250, } +data $g_1183 = align 8 { b 254, z 7, l 5, w 2966657800, z 4, l 3, w 1, w 1125518946, w 1, w 18446744073709551615, w 18446744073709551615, z 4 } +data $g_1269 = align 8 { l $g_296, } +data $g_1298 = align 4 { w 489894291, } +data $g_1313 = align 8 { l $g_1037, } +data $g_1393 = align 4 { w 4294967294, } +data $g_1476 = align 8 { l 0, } +data $g_1590 = align 8 { l $g_619, } +data $g_1589 = align 8 { l $g_1590, } +data $g_1604 = align 8 { l 18446744073709551615, } +data $g_1616 = align 8 { l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, l $g_1476, } +data $g_1615 = align 8 { l 0, l 0, l 0, l 0, l 0, } +data $g_1617 = align 2 { h 65528, } +data $g_1645 = align 4 { w 218946655, } +data $g_1706 = align 8 { l 0, } +data $g_1705 = align 8 { l $g_1706, } +data $g_1752 = align 8 { l 0, } +data $g_1922 = align 2 { h 18773, } +data $g_1972 = align 8 { l 1, } +data $g_1984 = align 8 { l $g_1269, } +data $g_1983 = align 8 { l $g_1984, } +data $g_2013 = align 4 { w 2920810850, } +data $g_2028 = align 8 { l 0, } +data $g_2102 = align 2 { h 0, } +data $g_2127 = align 8 { l $g_394, } +function l $func_1() { +@start.692 + %.1 =l alloc8 8 + %.7 =l alloc8 8 + %.9 =l alloc8 8 + %.11 =l alloc8 8 + %.13 =l alloc8 8 + %.15 =l alloc4 40 + %.50 =l alloc4 8 + %.51 =l alloc4 4 + %.52 =l alloc4 4 + %.55 =l alloc8 8 + %.58 =l alloc4 2 + %.61 =l alloc4 4 + %.64 =l alloc8 8 + %.68 =l alloc8 8 + %.70 =l alloc4 8 + %.71 =l alloc8 64 + %.80 =l alloc4 4 + %.83 =l alloc8 8 + %.85 =l alloc4 4 + %.88 =l alloc4 20 + %.100 =l alloc8 8 + %.104 =l alloc8 8 + %.106 =l alloc4 2 + %.109 =l alloc4 1 + %.112 =l alloc4 4 + %.113 =l alloc4 4 + %.154 =l alloc4 24 + %.167 =l alloc8 8 + %.173 =l alloc8 8 + %.175 =l alloc4 4 + %.178 =l alloc4 4 + %.181 =l alloc4 4 + %.184 =l alloc4 4 + %.185 =l alloc8 240 + %.241 =l alloc4 4 + %.244 =l alloc4 32 + %.261 =l alloc4 2 + %.264 =l alloc8 8 + %.266 =l alloc8 8 + %.272 =l alloc8 8 + %.278 =l alloc8 8 + %.289 =l alloc8 8 + %.295 =l alloc8 8 + %.306 =l alloc8 8 + %.308 =l alloc8 56 + %.309 =l alloc4 8 + %.310 =l alloc4 4 + %.313 =l alloc8 8 + %.317 =l alloc8 8 + %.323 =l alloc8 8 + %.325 =l alloc8 3136 + %.1642 =l alloc4 2 + %.1645 =l alloc4 2 + %.1648 =l alloc8 1600 + %.2010 =l alloc4 4 + %.2013 =l alloc4 4 + %.2014 =l alloc4 4 + %.2015 =l alloc4 4 + %.2050 =l alloc8 40 + %.2051 =l alloc8 8 + %.2053 =l alloc8 8 + %.2059 =l alloc8 8 + %.2062 =l alloc4 2 + %.2067 =l alloc8 64 + %.2076 =l alloc4 216 + %.2185 =l alloc4 4 + %.2186 =l alloc4 4 + %.2219 =l alloc4 140 + %.2360 =l alloc4 4 + %.2363 =l alloc4 4 + %.2366 =l alloc4 4 + %.2371 =l alloc4 192 + %.2468 =l alloc4 4 + %.2469 =l alloc4 4 + %.2470 =l alloc4 4 + %.2475 =l alloc8 1728 + %.3142 =l alloc4 4 + %.3145 =l alloc4 24 + %.3194 =l alloc4 4 + %.3197 =l alloc4 4 + %.3200 =l alloc4 4 + %.3201 =l alloc4 4 + %.3202 =l alloc4 4 + %.3331 =l alloc4 4 + %.3334 =l alloc8 8 + %.3336 =l alloc4 4 + %.3341 =l alloc4 4 + %.3344 =l alloc4 4 + %.3452 =l alloc4 2 + %.3455 =l alloc8 8 + %.3457 =l alloc8 64 + %.3474 =l alloc4 28 + %.3475 =l alloc4 4 + %.3480 =l alloc4 4 + %.3483 =l alloc4 4 + %.3514 =l alloc8 128 + %.3544 =l alloc4 1 + %.3547 =l alloc4 4 + %.3550 =l alloc8 8 + %.3552 =l alloc8 8 + %.3558 =l alloc8 8 + %.3560 =l alloc4 2 + %.3565 =l alloc4 2 + %.3568 =l alloc4 4 + %.3569 =l alloc4 4 + %.3691 =l alloc4 1 + %.3694 =l alloc4 2 + %.3767 =l alloc4 16 + %.3768 =l alloc8 8 + %.3772 =l alloc8 40 + %.3778 =l alloc8 8 + %.3784 =l alloc4 4 + %.3785 =l alloc4 4 + %.3921 =l alloc8 8 + %.3923 =l alloc8 8 + %.3925 =l alloc8 8 + %.3931 =l alloc4 4 + %.3934 =l alloc4 1 + %.3937 =l alloc4 4 + %.4061 =l alloc4 12 + %.4062 =l alloc8 8 + %.4068 =l alloc8 8 + %.4070 =l alloc8 8 + %.4072 =l alloc4 4 + %.4160 =l alloc8 8 + %.4179 =l alloc8 8 + %.4183 =l alloc8 8 + %.4186 =l alloc4 4 + %.4191 =l alloc4 4 + %.4194 =l alloc4 4 + %.4197 =l alloc4 4 + %.4202 =l alloc4 4 + %.4205 =l alloc4 32 + %.4228 =l alloc4 4 + %.4229 =l alloc4 4 + %.4325 =l alloc4 4 + %.4398 =l alloc4 2 + %.4401 =l alloc4 120 + %.4480 =l alloc8 8 + %.4492 =l alloc4 4 + %.4495 =l alloc4 4 + %.4498 =l alloc4 4 + %.4511 =l alloc8 8 + %.4512 =l alloc8 56 + %.4540 =l alloc8 8 + %.4544 =l alloc4 4 + %.4547 =l alloc8 8 + %.4553 =l alloc4 4 + %.4556 =l alloc8 8 + %.4558 =l alloc4 4 + %.4561 =l alloc4 4 + %.4585 =l alloc4 1 + %.4588 =l alloc4 4 + %.4591 =l alloc8 8 + %.4596 =l alloc8 8 + %.4610 =l alloc8 8 + %.4614 =l alloc4 4 + %.4617 =l alloc4 20 + %.4629 =l alloc4 1 + %.4769 =l alloc4 2 + %.4772 =l alloc8 24 + %.4773 =l alloc8 8 + %.4778 =l alloc8 8 + %.4782 =l alloc8 8 + %.4784 =l alloc8 8 + %.4789 =l alloc4 4 + %.4965 =l alloc8 8 + %.4967 =l alloc8 8 + %.4969 =l alloc8 8 + %.5136 =l alloc4 1 + %.5176 =l alloc4 4 + %.5179 =l alloc4 4 + %.5182 =l alloc4 4 + %.5187 =l alloc4 1 + %.5190 =l alloc8 8 + %.5192 =l alloc4 8 + %.5193 =l alloc8 8 + %.5196 =l alloc4 2 + %.5199 =l alloc8 8 + %.5202 =l alloc4 20 + %.5203 =l alloc4 4 + %.5226 =l alloc8 8 + %.5232 =l alloc8 64 + %.5265 =l alloc8 56 + %.5295 =l alloc8 8 + %.5297 =l alloc8 8 + %.5301 =l alloc8 8 + %.5307 =l alloc8 8 + %.5313 =l alloc8 8 + %.5319 =l alloc4 4 + %.5322 =l alloc4 1 + %.5325 =l alloc4 4 + %.5328 =l alloc4 4 + %.5412 =l alloc4 4 + %.5415 =l alloc8 8 + %.5419 =l alloc8 8 + %.5423 =l alloc8 960 + %.5845 =l alloc4 16 + %.5846 =l alloc4 4 + %.5847 =l alloc4 4 + %.5848 =l alloc4 4 + %.5938 =l alloc4 2 + %.5941 =l alloc4 4 + %.5944 =l alloc4 1 + %.5947 =l alloc8 8 + %.5949 =l alloc4 4 + %.5952 =l alloc4 4 + %.5955 =l alloc4 20 + %.5979 =l alloc4 4 + %.5982 =l alloc4 4 + %.5985 =l alloc8 64 + %.6010 =l alloc8 8 + %.6015 =l alloc4 4 + %.6267 =l alloc4 20 + %.6279 =l alloc8 8 + %.6285 =l alloc4 4 + %.6412 =l alloc8 8 + %.6416 =l alloc8 8 + %.6422 =l alloc8 8 + %.6428 =l alloc4 4 + %.6663 =l alloc4 60 + %.6703 =l alloc8 64 + %.6728 =l alloc4 4 + %.6758 =l alloc8 72 + %.6804 =l alloc8 8 + %.6806 =l alloc4 4 + %.6850 =l alloc4 2 + %.6853 =l alloc8 8 + %.6855 =l alloc8 8 + %.6861 =l alloc4 4 + %.6866 =l alloc4 4 + %.6871 =l alloc4 24 + %.6884 =l alloc4 4 + %.7004 =l alloc4 1008 + %.7509 =l alloc4 4 + %.7510 =l alloc4 4 + %.7511 =l alloc4 4 + %.7516 =l alloc4 4 + %.7519 =l alloc4 4 + %.7522 =l alloc8 8 + %.7528 =l alloc8 8 + %.7534 =l alloc8 8 + %.7540 =l alloc8 8 + %.7546 =l alloc8 8 + %.7552 =l alloc8 8 + %.7558 =l alloc8 8 + %.7562 =l alloc8 8 + %.7568 =l alloc8 8 + %.7574 =l alloc8 72 + %.7602 =l alloc4 4 +@body.693 + %.2 =l add %.1, 0 + %.3 =l copy $g_185 + %.4 =l mul 48, 1 + %.5 =l add %.3, %.4 + %.6 =l copy %.5 + storel %.6, %.2 + %.8 =l add %.7, 0 + storel $g_88, %.8 + %.10 =l add %.9, 0 + storel %.7, %.10 + %.12 =l add %.11, 0 + storel %.7, %.12 + %.14 =l add %.13, 0 + storel $g_88, %.14 + %.16 =l add %.15, 0 + %.17 =l extsw 0 + %.18 =l sub %.17, 1 + %.19 =w copy %.18 + storew %.19, %.16 + %.20 =l add %.15, 4 + %.21 =l extsw 0 + %.22 =l sub %.21, 1 + %.23 =w copy %.22 + storew %.23, %.20 + %.24 =l add %.15, 8 + %.25 =w copy 0 + storew %.25, %.24 + %.26 =l add %.15, 12 + %.27 =l extsw 0 + %.28 =l sub %.27, 1 + %.29 =w copy %.28 + storew %.29, %.26 + %.30 =l add %.15, 16 + %.31 =l extsw 0 + %.32 =l sub %.31, 1 + %.33 =w copy %.32 + storew %.33, %.30 + %.34 =l add %.15, 20 + %.35 =w copy 0 + storew %.35, %.34 + %.36 =l add %.15, 24 + %.37 =l extsw 0 + %.38 =l sub %.37, 1 + %.39 =w copy %.38 + storew %.39, %.36 + %.40 =l add %.15, 28 + %.41 =l extsw 0 + %.42 =l sub %.41, 1 + %.43 =w copy %.42 + storew %.43, %.40 + %.44 =l add %.15, 32 + %.45 =w copy 0 + storew %.45, %.44 + %.46 =l add %.15, 36 + %.47 =l extsw 0 + %.48 =l sub %.47, 1 + %.49 =w copy %.48 + storew %.49, %.46 + %.53 =l add %.52, 0 + %.54 =w copy 1876554256 + storew %.54, %.53 + %.56 =l add %.55, 0 + %.57 =l copy 388595597875467280 + storel %.57, %.56 + %.59 =l add %.58, 0 + %.60 =w copy 15327 + storeh %.60, %.59 + %.62 =l add %.61, 0 + %.63 =w copy 7 + storew %.63, %.62 + %.65 =l add %.64, 0 + %.66 =l extsw 0 + %.67 =l copy %.66 + storel %.67, %.65 + %.69 =l add %.68, 0 + storel %.64, %.69 + %.72 =l add %.71, 0 + storel $g_1038, %.72 + %.73 =l add %.71, 8 + storel $g_1038, %.73 + %.74 =l add %.71, 16 + storel $g_1038, %.74 + %.75 =l add %.71, 24 + storel $g_1038, %.75 + %.76 =l add %.71, 32 + storel $g_1038, %.76 + %.77 =l add %.71, 40 + storel $g_1038, %.77 + %.78 =l add %.71, 48 + storel $g_1038, %.78 + %.79 =l add %.71, 56 + storel $g_1038, %.79 + %.81 =l add %.80, 0 + %.82 =w copy 3267697444 + storew %.82, %.81 + %.84 =l add %.83, 0 + storel $g_1070, %.84 + %.86 =l add %.85, 0 + %.87 =w copy 1 + storew %.87, %.86 + %.89 =l add %.88, 0 + %.90 =w copy 0 + storew %.90, %.89 + %.91 =l add %.88, 4 + %.92 =w copy 9 + storew %.92, %.91 + %.93 =l add %.88, 8 + %.94 =w copy 64920 + storeh %.94, %.93 + %.95 =l add %.88, 10 + storeh 0, %.95 + %.96 =l add %.88, 12 + %.97 =w copy 9 + storew %.97, %.96 + %.98 =l add %.88, 16 + %.99 =w copy 18446744073709551615 + storew %.99, %.98 + %.101 =l add %.100, 0 + %.102 =l extsw 0 + %.103 =l copy %.102 + storel %.103, %.101 + %.105 =l add %.104, 0 + storel %.100, %.105 + %.107 =l add %.106, 0 + %.108 =w copy 8 + storeh %.108, %.107 + %.110 =l add %.109, 0 + %.111 =w copy 255 + storeb %.111, %.110 + storew 0, %.112 +@for_cond.694 + %.114 =w loadsw %.112 + %.115 =w csltw %.114, 4 + jnz %.115, @for_body.695, @for_join.697 +@for_body.695 + %.116 =w copy 48300 + %.117 =w loadsw %.112 + %.118 =l extsw %.117 + %.119 =l mul %.118, 2 + %.120 =l add %.50, %.119 + storeh %.116, %.120 +@for_cont.696 + %.121 =w loadsw %.112 + %.122 =w add %.121, 1 + storew %.122, %.112 + jmp @for_cond.694 +@for_join.697 + storew 0, %.112 +@for_cond.698 + %.123 =w loadsw %.112 + %.124 =w csltw %.123, 2 + jnz %.124, @for_body.699, @for_join.701 +@for_body.699 + %.125 =w copy 45763 + %.126 =w loadsw %.112 + %.127 =l extsw %.126 + %.128 =l mul %.127, 2 + %.129 =l add %.51, %.128 + storeh %.125, %.129 +@for_cont.700 + %.130 =w loadsw %.112 + %.131 =w add %.130, 1 + storew %.131, %.112 + jmp @for_cond.698 +@for_join.701 + storew 0, %.112 +@for_cond.702 + %.132 =w loadsw %.112 + %.133 =w csltw %.132, 1 + jnz %.133, @for_body.703, @for_join.705 +@for_body.703 + storew 0, %.113 +@for_cond.706 + %.134 =w loadsw %.113 + %.135 =w csltw %.134, 4 + jnz %.135, @for_body.707, @for_join.709 +@for_body.707 + %.136 =l extsw 0 + %.137 =l sub %.136, 8 + %.138 =w copy %.137 + %.139 =w loadsw %.112 + %.140 =l extsw %.139 + %.141 =l mul %.140, 8 + %.142 =l add %.70, %.141 + %.143 =w loadsw %.113 + %.144 =l extsw %.143 + %.145 =l mul %.144, 2 + %.146 =l add %.142, %.145 + storeh %.138, %.146 +@for_cont.708 + %.147 =w loadsw %.113 + %.148 =w add %.147, 1 + storew %.148, %.113 + jmp @for_cond.706 +@for_join.709 +@for_cont.704 + %.149 =w loadsw %.112 + %.150 =w add %.149, 1 + storew %.150, %.112 + jmp @for_cond.702 +@for_join.705 + %.151 =w loadsb $g_2 + %.152 =w extsb %.151 + %.153 =w cnew %.152, 0 + jnz %.153, @if_true.710, @if_false.711 +@if_true.710 + %.155 =l add %.154, 0 + %.156 =w copy 448696097 + storew %.156, %.155 + %.157 =l add %.154, 4 + %.158 =w copy 448696097 + storew %.158, %.157 + %.159 =l add %.154, 8 + %.160 =w copy 3159920155 + storew %.160, %.159 + %.161 =l add %.154, 12 + %.162 =w copy 448696097 + storew %.162, %.161 + %.163 =l add %.154, 16 + %.164 =w copy 448696097 + storew %.164, %.163 + %.165 =l add %.154, 20 + %.166 =w copy 3159920155 + storew %.166, %.165 + %.168 =l add %.167, 0 + %.169 =l copy $g_265 + %.170 =l mul 44, 1 + %.171 =l add %.169, %.170 + %.172 =l copy %.171 + storel %.172, %.168 + %.174 =l add %.173, 0 + storel %.7, %.174 + %.176 =l add %.175, 0 + %.177 =w copy 3065563876 + storew %.177, %.176 + %.179 =l add %.178, 0 + %.180 =w copy 3026640288 + storew %.180, %.179 + %.182 =l add %.181, 0 + %.183 =w copy 3133052029 + storew %.183, %.182 + %.186 =l add %.185, 0 + %.187 =l copy 5 + storel %.187, %.186 + %.188 =l add %.185, 8 + %.189 =l copy 2140593435845799635 + storel %.189, %.188 + %.190 =l add %.185, 16 + storel 14997647914956660667, %.190 + %.191 =l add %.185, 24 + %.192 =l copy 1 + storel %.192, %.191 + %.193 =l add %.185, 32 + %.194 =l copy 1 + storel %.194, %.193 + %.195 =l add %.185, 40 + storel 14997647914956660667, %.195 + %.196 =l add %.185, 48 + %.197 =l copy 2140593435845799635 + storel %.197, %.196 + %.198 =l add %.185, 56 + %.199 =l copy 5 + storel %.199, %.198 + %.200 =l add %.185, 64 + %.201 =l copy 2140593435845799635 + storel %.201, %.200 + %.202 =l add %.185, 72 + storel 14997647914956660667, %.202 + %.203 =l add %.185, 80 + %.204 =l copy 18446744073709551609 + storel %.204, %.203 + %.205 =l add %.185, 88 + %.206 =l copy 18446744073709551615 + storel %.206, %.205 + %.207 =l add %.185, 96 + %.208 =l copy 1 + storel %.208, %.207 + %.209 =l add %.185, 104 + %.210 =l copy 18446744073709551615 + storel %.210, %.209 + %.211 =l add %.185, 112 + %.212 =l copy 18446744073709551609 + storel %.212, %.211 + %.213 =l add %.185, 120 + storel 14997647914956660667, %.213 + %.214 =l add %.185, 128 + storel 14997647914956660667, %.214 + %.215 =l add %.185, 136 + %.216 =l copy 18446744073709551609 + storel %.216, %.215 + %.217 =l add %.185, 144 + %.218 =l copy 18446744073709551615 + storel %.218, %.217 + %.219 =l add %.185, 152 + %.220 =l copy 1 + storel %.220, %.219 + %.221 =l add %.185, 160 + %.222 =l copy 5 + storel %.222, %.221 + %.223 =l add %.185, 168 + %.224 =l copy 5 + storel %.224, %.223 + %.225 =l add %.185, 176 + %.226 =l copy 1 + storel %.226, %.225 + %.227 =l add %.185, 184 + %.228 =l copy 18446744073709551609 + storel %.228, %.227 + %.229 =l add %.185, 192 + %.230 =l copy 8317808307966024155 + storel %.230, %.229 + %.231 =l add %.185, 200 + %.232 =l copy 18446744073709551609 + storel %.232, %.231 + %.233 =l add %.185, 208 + %.234 =l copy 1 + storel %.234, %.233 + %.235 =l add %.185, 216 + %.236 =l copy 5 + storel %.236, %.235 + %.237 =l add %.185, 224 + %.238 =l copy 5 + storel %.238, %.237 + %.239 =l add %.185, 232 + %.240 =l copy 1 + storel %.240, %.239 + %.242 =l add %.241, 0 + %.243 =w copy 0 + storew %.243, %.242 + %.245 =l add %.244, 0 + %.246 =w copy 2269255619 + storew %.246, %.245 + %.247 =l add %.244, 4 + %.248 =w copy 2269255619 + storew %.248, %.247 + %.249 =l add %.244, 8 + %.250 =w copy 1 + storew %.250, %.249 + %.251 =l add %.244, 12 + %.252 =w copy 2269255619 + storew %.252, %.251 + %.253 =l add %.244, 16 + %.254 =w copy 2269255619 + storew %.254, %.253 + %.255 =l add %.244, 20 + %.256 =w copy 1 + storew %.256, %.255 + %.257 =l add %.244, 24 + %.258 =w copy 2269255619 + storew %.258, %.257 + %.259 =l add %.244, 28 + %.260 =w copy 2269255619 + storew %.260, %.259 + %.262 =l add %.261, 0 + %.263 =w copy 0 + storeh %.263, %.262 + %.265 =l add %.264, 0 + storel %.178, %.265 + %.267 =l add %.266, 0 + %.268 =l copy $g_265 + %.269 =l mul 48, 1 + %.270 =l add %.268, %.269 + %.271 =l copy %.270 + storel %.271, %.267 + %.273 =l add %.272, 0 + %.274 =l copy $g_1183 + %.275 =l mul 48, 1 + %.276 =l add %.274, %.275 + %.277 =l copy %.276 + storel %.277, %.273 + %.279 =l add %.278, 0 + %.280 =l extsw 6 + %.281 =l mul %.280, 12 + %.282 =l add $g_13, %.281 + %.283 =l extsw 1 + %.284 =l mul %.283, 4 + %.285 =l add %.282, %.284 + %.286 =l extsw 0 + %.287 =l mul %.286, 4 + %.288 =l add %.285, %.287 + storel %.288, %.279 + %.290 =l add %.289, 0 + %.291 =l copy $g_1183 + %.292 =l mul 16, 1 + %.293 =l add %.291, %.292 + %.294 =l copy %.293 + storel %.294, %.290 + %.296 =l add %.295, 0 + %.297 =l extsw 7 + %.298 =l mul %.297, 12 + %.299 =l add $g_13, %.298 + %.300 =l extsw 0 + %.301 =l mul %.300, 4 + %.302 =l add %.299, %.301 + %.303 =l extsw 0 + %.304 =l mul %.303, 4 + %.305 =l add %.302, %.304 + storel %.305, %.296 + %.307 =l add %.306, 0 + storel %.178, %.307 + %.311 =l add %.310, 0 + %.312 =w copy 18446744073709551615 + storew %.312, %.311 + %.314 =l add %.313, 0 + %.315 =l extsw 0 + %.316 =l copy %.315 + storel %.316, %.314 + %.318 =l add %.317, 0 + %.319 =l copy $g_265 + %.320 =l mul 8, 1 + %.321 =l add %.319, %.320 + %.322 =l copy %.321 + storel %.322, %.318 + %.324 =l add %.323, 0 + storel $g_80, %.324 + %.326 =l add %.325, 0 + %.327 =w copy 59 + storeb %.327, %.326 + %.328 =l add %.325, 1 + storeb 0, %.328 + %.329 =l add %.325, 2 + storeh 0, %.329 + %.330 =l add %.325, 4 + storew 0, %.330 + %.331 =l add %.325, 8 + storel 5846713185812282113, %.331 + %.332 =l add %.325, 16 + %.333 =w copy 3470287970 + storew %.333, %.332 + %.334 =l add %.325, 20 + storew 0, %.334 + %.335 =l add %.325, 24 + storel 12566983408779698474, %.335 + %.336 =l add %.325, 32 + %.337 =w copy 848682309 + storew %.337, %.336 + %.338 =l add %.325, 36 + %.339 =w copy 5 + storew %.339, %.338 + %.340 =l add %.325, 40 + %.341 =w copy 462078022 + storew %.341, %.340 + %.342 =l add %.325, 44 + %.343 =l extsw 0 + %.344 =l sub %.343, 7 + %.345 =w copy %.344 + storew %.345, %.342 + %.346 =l add %.325, 48 + %.347 =l extsw 0 + %.348 =l sub %.347, 1 + %.349 =w copy %.348 + storew %.349, %.346 + %.350 =l add %.325, 52 + storew 0, %.350 + %.351 =l add %.325, 56 + %.352 =w copy 30 + storeb %.352, %.351 + %.353 =l add %.325, 57 + storeb 0, %.353 + %.354 =l add %.325, 58 + storeh 0, %.354 + %.355 =l add %.325, 60 + storew 0, %.355 + %.356 =l add %.325, 64 + storel 4531615791379082412, %.356 + %.357 =l add %.325, 72 + %.358 =w copy 3542425067 + storew %.358, %.357 + %.359 =l add %.325, 76 + storew 0, %.359 + %.360 =l add %.325, 80 + %.361 =l copy 18446744073709551615 + storel %.361, %.360 + %.362 =l add %.325, 88 + %.363 =w copy 2349175835 + storew %.363, %.362 + %.364 =l add %.325, 92 + %.365 =w copy 1457159742 + storew %.365, %.364 + %.366 =l add %.325, 96 + %.367 =w copy 673000678 + storew %.367, %.366 + %.368 =l add %.325, 100 + %.369 =w copy 2013111086 + storew %.369, %.368 + %.370 =l add %.325, 104 + %.371 =w copy 713487104 + storew %.371, %.370 + %.372 =l add %.325, 108 + storew 0, %.372 + %.373 =l add %.325, 112 + %.374 =w copy 59 + storeb %.374, %.373 + %.375 =l add %.325, 113 + storeb 0, %.375 + %.376 =l add %.325, 114 + storeh 0, %.376 + %.377 =l add %.325, 116 + storew 0, %.377 + %.378 =l add %.325, 120 + storel 5846713185812282113, %.378 + %.379 =l add %.325, 128 + %.380 =w copy 3470287970 + storew %.380, %.379 + %.381 =l add %.325, 132 + storew 0, %.381 + %.382 =l add %.325, 136 + storel 12566983408779698474, %.382 + %.383 =l add %.325, 144 + %.384 =w copy 848682309 + storew %.384, %.383 + %.385 =l add %.325, 148 + %.386 =w copy 5 + storew %.386, %.385 + %.387 =l add %.325, 152 + %.388 =w copy 462078022 + storew %.388, %.387 + %.389 =l add %.325, 156 + %.390 =l extsw 0 + %.391 =l sub %.390, 7 + %.392 =w copy %.391 + storew %.392, %.389 + %.393 =l add %.325, 160 + %.394 =l extsw 0 + %.395 =l sub %.394, 1 + %.396 =w copy %.395 + storew %.396, %.393 + %.397 =l add %.325, 164 + storew 0, %.397 + %.398 =l add %.325, 168 + %.399 =w copy 30 + storeb %.399, %.398 + %.400 =l add %.325, 169 + storeb 0, %.400 + %.401 =l add %.325, 170 + storeh 0, %.401 + %.402 =l add %.325, 172 + storew 0, %.402 + %.403 =l add %.325, 176 + storel 4531615791379082412, %.403 + %.404 =l add %.325, 184 + %.405 =w copy 3542425067 + storew %.405, %.404 + %.406 =l add %.325, 188 + storew 0, %.406 + %.407 =l add %.325, 192 + %.408 =l copy 18446744073709551615 + storel %.408, %.407 + %.409 =l add %.325, 200 + %.410 =w copy 2349175835 + storew %.410, %.409 + %.411 =l add %.325, 204 + %.412 =w copy 1457159742 + storew %.412, %.411 + %.413 =l add %.325, 208 + %.414 =w copy 673000678 + storew %.414, %.413 + %.415 =l add %.325, 212 + %.416 =w copy 2013111086 + storew %.416, %.415 + %.417 =l add %.325, 216 + %.418 =w copy 713487104 + storew %.418, %.417 + %.419 =l add %.325, 220 + storew 0, %.419 + %.420 =l add %.325, 224 + %.421 =w copy 59 + storeb %.421, %.420 + %.422 =l add %.325, 225 + storeb 0, %.422 + %.423 =l add %.325, 226 + storeh 0, %.423 + %.424 =l add %.325, 228 + storew 0, %.424 + %.425 =l add %.325, 232 + storel 5846713185812282113, %.425 + %.426 =l add %.325, 240 + %.427 =w copy 3470287970 + storew %.427, %.426 + %.428 =l add %.325, 244 + storew 0, %.428 + %.429 =l add %.325, 248 + storel 12566983408779698474, %.429 + %.430 =l add %.325, 256 + %.431 =w copy 848682309 + storew %.431, %.430 + %.432 =l add %.325, 260 + %.433 =w copy 5 + storew %.433, %.432 + %.434 =l add %.325, 264 + %.435 =w copy 462078022 + storew %.435, %.434 + %.436 =l add %.325, 268 + %.437 =l extsw 0 + %.438 =l sub %.437, 7 + %.439 =w copy %.438 + storew %.439, %.436 + %.440 =l add %.325, 272 + %.441 =l extsw 0 + %.442 =l sub %.441, 1 + %.443 =w copy %.442 + storew %.443, %.440 + %.444 =l add %.325, 276 + storew 0, %.444 + %.445 =l add %.325, 280 + %.446 =w copy 30 + storeb %.446, %.445 + %.447 =l add %.325, 281 + storeb 0, %.447 + %.448 =l add %.325, 282 + storeh 0, %.448 + %.449 =l add %.325, 284 + storew 0, %.449 + %.450 =l add %.325, 288 + storel 4531615791379082412, %.450 + %.451 =l add %.325, 296 + %.452 =w copy 3542425067 + storew %.452, %.451 + %.453 =l add %.325, 300 + storew 0, %.453 + %.454 =l add %.325, 304 + %.455 =l copy 18446744073709551615 + storel %.455, %.454 + %.456 =l add %.325, 312 + %.457 =w copy 2349175835 + storew %.457, %.456 + %.458 =l add %.325, 316 + %.459 =w copy 1457159742 + storew %.459, %.458 + %.460 =l add %.325, 320 + %.461 =w copy 673000678 + storew %.461, %.460 + %.462 =l add %.325, 324 + %.463 =w copy 2013111086 + storew %.463, %.462 + %.464 =l add %.325, 328 + %.465 =w copy 713487104 + storew %.465, %.464 + %.466 =l add %.325, 332 + storew 0, %.466 + %.467 =l add %.325, 336 + %.468 =w copy 59 + storeb %.468, %.467 + %.469 =l add %.325, 337 + storeb 0, %.469 + %.470 =l add %.325, 338 + storeh 0, %.470 + %.471 =l add %.325, 340 + storew 0, %.471 + %.472 =l add %.325, 344 + storel 5846713185812282113, %.472 + %.473 =l add %.325, 352 + %.474 =w copy 3470287970 + storew %.474, %.473 + %.475 =l add %.325, 356 + storew 0, %.475 + %.476 =l add %.325, 360 + storel 12566983408779698474, %.476 + %.477 =l add %.325, 368 + %.478 =w copy 848682309 + storew %.478, %.477 + %.479 =l add %.325, 372 + %.480 =w copy 5 + storew %.480, %.479 + %.481 =l add %.325, 376 + %.482 =w copy 462078022 + storew %.482, %.481 + %.483 =l add %.325, 380 + %.484 =l extsw 0 + %.485 =l sub %.484, 7 + %.486 =w copy %.485 + storew %.486, %.483 + %.487 =l add %.325, 384 + %.488 =l extsw 0 + %.489 =l sub %.488, 1 + %.490 =w copy %.489 + storew %.490, %.487 + %.491 =l add %.325, 388 + storew 0, %.491 + %.492 =l add %.325, 392 + %.493 =w copy 30 + storeb %.493, %.492 + %.494 =l add %.325, 393 + storeb 0, %.494 + %.495 =l add %.325, 394 + storeh 0, %.495 + %.496 =l add %.325, 396 + storew 0, %.496 + %.497 =l add %.325, 400 + storel 4531615791379082412, %.497 + %.498 =l add %.325, 408 + %.499 =w copy 3542425067 + storew %.499, %.498 + %.500 =l add %.325, 412 + storew 0, %.500 + %.501 =l add %.325, 416 + %.502 =l copy 18446744073709551615 + storel %.502, %.501 + %.503 =l add %.325, 424 + %.504 =w copy 2349175835 + storew %.504, %.503 + %.505 =l add %.325, 428 + %.506 =w copy 1457159742 + storew %.506, %.505 + %.507 =l add %.325, 432 + %.508 =w copy 673000678 + storew %.508, %.507 + %.509 =l add %.325, 436 + %.510 =w copy 2013111086 + storew %.510, %.509 + %.511 =l add %.325, 440 + %.512 =w copy 713487104 + storew %.512, %.511 + %.513 =l add %.325, 444 + storew 0, %.513 + %.514 =l add %.325, 448 + %.515 =w copy 59 + storeb %.515, %.514 + %.516 =l add %.325, 449 + storeb 0, %.516 + %.517 =l add %.325, 450 + storeh 0, %.517 + %.518 =l add %.325, 452 + storew 0, %.518 + %.519 =l add %.325, 456 + storel 5846713185812282113, %.519 + %.520 =l add %.325, 464 + %.521 =w copy 3470287970 + storew %.521, %.520 + %.522 =l add %.325, 468 + storew 0, %.522 + %.523 =l add %.325, 472 + storel 12566983408779698474, %.523 + %.524 =l add %.325, 480 + %.525 =w copy 848682309 + storew %.525, %.524 + %.526 =l add %.325, 484 + %.527 =w copy 5 + storew %.527, %.526 + %.528 =l add %.325, 488 + %.529 =w copy 462078022 + storew %.529, %.528 + %.530 =l add %.325, 492 + %.531 =l extsw 0 + %.532 =l sub %.531, 7 + %.533 =w copy %.532 + storew %.533, %.530 + %.534 =l add %.325, 496 + %.535 =l extsw 0 + %.536 =l sub %.535, 1 + %.537 =w copy %.536 + storew %.537, %.534 + %.538 =l add %.325, 500 + storew 0, %.538 + %.539 =l add %.325, 504 + %.540 =w copy 30 + storeb %.540, %.539 + %.541 =l add %.325, 505 + storeb 0, %.541 + %.542 =l add %.325, 506 + storeh 0, %.542 + %.543 =l add %.325, 508 + storew 0, %.543 + %.544 =l add %.325, 512 + storel 4531615791379082412, %.544 + %.545 =l add %.325, 520 + %.546 =w copy 3542425067 + storew %.546, %.545 + %.547 =l add %.325, 524 + storew 0, %.547 + %.548 =l add %.325, 528 + %.549 =l copy 18446744073709551615 + storel %.549, %.548 + %.550 =l add %.325, 536 + %.551 =w copy 2349175835 + storew %.551, %.550 + %.552 =l add %.325, 540 + %.553 =w copy 1457159742 + storew %.553, %.552 + %.554 =l add %.325, 544 + %.555 =w copy 673000678 + storew %.555, %.554 + %.556 =l add %.325, 548 + %.557 =w copy 2013111086 + storew %.557, %.556 + %.558 =l add %.325, 552 + %.559 =w copy 713487104 + storew %.559, %.558 + %.560 =l add %.325, 556 + storew 0, %.560 + %.561 =l add %.325, 560 + %.562 =w copy 59 + storeb %.562, %.561 + %.563 =l add %.325, 561 + storeb 0, %.563 + %.564 =l add %.325, 562 + storeh 0, %.564 + %.565 =l add %.325, 564 + storew 0, %.565 + %.566 =l add %.325, 568 + storel 5846713185812282113, %.566 + %.567 =l add %.325, 576 + %.568 =w copy 3470287970 + storew %.568, %.567 + %.569 =l add %.325, 580 + storew 0, %.569 + %.570 =l add %.325, 584 + storel 12566983408779698474, %.570 + %.571 =l add %.325, 592 + %.572 =w copy 848682309 + storew %.572, %.571 + %.573 =l add %.325, 596 + %.574 =w copy 5 + storew %.574, %.573 + %.575 =l add %.325, 600 + %.576 =w copy 462078022 + storew %.576, %.575 + %.577 =l add %.325, 604 + %.578 =l extsw 0 + %.579 =l sub %.578, 7 + %.580 =w copy %.579 + storew %.580, %.577 + %.581 =l add %.325, 608 + %.582 =l extsw 0 + %.583 =l sub %.582, 1 + %.584 =w copy %.583 + storew %.584, %.581 + %.585 =l add %.325, 612 + storew 0, %.585 + %.586 =l add %.325, 616 + %.587 =w copy 30 + storeb %.587, %.586 + %.588 =l add %.325, 617 + storeb 0, %.588 + %.589 =l add %.325, 618 + storeh 0, %.589 + %.590 =l add %.325, 620 + storew 0, %.590 + %.591 =l add %.325, 624 + storel 4531615791379082412, %.591 + %.592 =l add %.325, 632 + %.593 =w copy 3542425067 + storew %.593, %.592 + %.594 =l add %.325, 636 + storew 0, %.594 + %.595 =l add %.325, 640 + %.596 =l copy 18446744073709551615 + storel %.596, %.595 + %.597 =l add %.325, 648 + %.598 =w copy 2349175835 + storew %.598, %.597 + %.599 =l add %.325, 652 + %.600 =w copy 1457159742 + storew %.600, %.599 + %.601 =l add %.325, 656 + %.602 =w copy 673000678 + storew %.602, %.601 + %.603 =l add %.325, 660 + %.604 =w copy 2013111086 + storew %.604, %.603 + %.605 =l add %.325, 664 + %.606 =w copy 713487104 + storew %.606, %.605 + %.607 =l add %.325, 668 + storew 0, %.607 + %.608 =l add %.325, 672 + %.609 =w copy 59 + storeb %.609, %.608 + %.610 =l add %.325, 673 + storeb 0, %.610 + %.611 =l add %.325, 674 + storeh 0, %.611 + %.612 =l add %.325, 676 + storew 0, %.612 + %.613 =l add %.325, 680 + storel 5846713185812282113, %.613 + %.614 =l add %.325, 688 + %.615 =w copy 3470287970 + storew %.615, %.614 + %.616 =l add %.325, 692 + storew 0, %.616 + %.617 =l add %.325, 696 + storel 12566983408779698474, %.617 + %.618 =l add %.325, 704 + %.619 =w copy 848682309 + storew %.619, %.618 + %.620 =l add %.325, 708 + %.621 =w copy 5 + storew %.621, %.620 + %.622 =l add %.325, 712 + %.623 =w copy 462078022 + storew %.623, %.622 + %.624 =l add %.325, 716 + %.625 =l extsw 0 + %.626 =l sub %.625, 7 + %.627 =w copy %.626 + storew %.627, %.624 + %.628 =l add %.325, 720 + %.629 =l extsw 0 + %.630 =l sub %.629, 1 + %.631 =w copy %.630 + storew %.631, %.628 + %.632 =l add %.325, 724 + storew 0, %.632 + %.633 =l add %.325, 728 + %.634 =w copy 30 + storeb %.634, %.633 + %.635 =l add %.325, 729 + storeb 0, %.635 + %.636 =l add %.325, 730 + storeh 0, %.636 + %.637 =l add %.325, 732 + storew 0, %.637 + %.638 =l add %.325, 736 + storel 4531615791379082412, %.638 + %.639 =l add %.325, 744 + %.640 =w copy 3542425067 + storew %.640, %.639 + %.641 =l add %.325, 748 + storew 0, %.641 + %.642 =l add %.325, 752 + %.643 =l copy 18446744073709551615 + storel %.643, %.642 + %.644 =l add %.325, 760 + %.645 =w copy 2349175835 + storew %.645, %.644 + %.646 =l add %.325, 764 + %.647 =w copy 1457159742 + storew %.647, %.646 + %.648 =l add %.325, 768 + %.649 =w copy 673000678 + storew %.649, %.648 + %.650 =l add %.325, 772 + %.651 =w copy 2013111086 + storew %.651, %.650 + %.652 =l add %.325, 776 + %.653 =w copy 713487104 + storew %.653, %.652 + %.654 =l add %.325, 780 + storew 0, %.654 + %.655 =l add %.325, 784 + %.656 =w copy 59 + storeb %.656, %.655 + %.657 =l add %.325, 785 + storeb 0, %.657 + %.658 =l add %.325, 786 + storeh 0, %.658 + %.659 =l add %.325, 788 + storew 0, %.659 + %.660 =l add %.325, 792 + storel 5846713185812282113, %.660 + %.661 =l add %.325, 800 + %.662 =w copy 3470287970 + storew %.662, %.661 + %.663 =l add %.325, 804 + storew 0, %.663 + %.664 =l add %.325, 808 + storel 12566983408779698474, %.664 + %.665 =l add %.325, 816 + %.666 =w copy 848682309 + storew %.666, %.665 + %.667 =l add %.325, 820 + %.668 =w copy 5 + storew %.668, %.667 + %.669 =l add %.325, 824 + %.670 =w copy 462078022 + storew %.670, %.669 + %.671 =l add %.325, 828 + %.672 =l extsw 0 + %.673 =l sub %.672, 7 + %.674 =w copy %.673 + storew %.674, %.671 + %.675 =l add %.325, 832 + %.676 =l extsw 0 + %.677 =l sub %.676, 1 + %.678 =w copy %.677 + storew %.678, %.675 + %.679 =l add %.325, 836 + storew 0, %.679 + %.680 =l add %.325, 840 + %.681 =w copy 30 + storeb %.681, %.680 + %.682 =l add %.325, 841 + storeb 0, %.682 + %.683 =l add %.325, 842 + storeh 0, %.683 + %.684 =l add %.325, 844 + storew 0, %.684 + %.685 =l add %.325, 848 + storel 4531615791379082412, %.685 + %.686 =l add %.325, 856 + %.687 =w copy 3542425067 + storew %.687, %.686 + %.688 =l add %.325, 860 + storew 0, %.688 + %.689 =l add %.325, 864 + %.690 =l copy 18446744073709551615 + storel %.690, %.689 + %.691 =l add %.325, 872 + %.692 =w copy 2349175835 + storew %.692, %.691 + %.693 =l add %.325, 876 + %.694 =w copy 1457159742 + storew %.694, %.693 + %.695 =l add %.325, 880 + %.696 =w copy 673000678 + storew %.696, %.695 + %.697 =l add %.325, 884 + %.698 =w copy 2013111086 + storew %.698, %.697 + %.699 =l add %.325, 888 + %.700 =w copy 713487104 + storew %.700, %.699 + %.701 =l add %.325, 892 + storew 0, %.701 + %.702 =l add %.325, 896 + %.703 =w copy 59 + storeb %.703, %.702 + %.704 =l add %.325, 897 + storeb 0, %.704 + %.705 =l add %.325, 898 + storeh 0, %.705 + %.706 =l add %.325, 900 + storew 0, %.706 + %.707 =l add %.325, 904 + storel 5846713185812282113, %.707 + %.708 =l add %.325, 912 + %.709 =w copy 3470287970 + storew %.709, %.708 + %.710 =l add %.325, 916 + storew 0, %.710 + %.711 =l add %.325, 920 + storel 12566983408779698474, %.711 + %.712 =l add %.325, 928 + %.713 =w copy 848682309 + storew %.713, %.712 + %.714 =l add %.325, 932 + %.715 =w copy 5 + storew %.715, %.714 + %.716 =l add %.325, 936 + %.717 =w copy 462078022 + storew %.717, %.716 + %.718 =l add %.325, 940 + %.719 =l extsw 0 + %.720 =l sub %.719, 7 + %.721 =w copy %.720 + storew %.721, %.718 + %.722 =l add %.325, 944 + %.723 =l extsw 0 + %.724 =l sub %.723, 1 + %.725 =w copy %.724 + storew %.725, %.722 + %.726 =l add %.325, 948 + storew 0, %.726 + %.727 =l add %.325, 952 + %.728 =w copy 30 + storeb %.728, %.727 + %.729 =l add %.325, 953 + storeb 0, %.729 + %.730 =l add %.325, 954 + storeh 0, %.730 + %.731 =l add %.325, 956 + storew 0, %.731 + %.732 =l add %.325, 960 + storel 4531615791379082412, %.732 + %.733 =l add %.325, 968 + %.734 =w copy 3542425067 + storew %.734, %.733 + %.735 =l add %.325, 972 + storew 0, %.735 + %.736 =l add %.325, 976 + %.737 =l copy 18446744073709551615 + storel %.737, %.736 + %.738 =l add %.325, 984 + %.739 =w copy 2349175835 + storew %.739, %.738 + %.740 =l add %.325, 988 + %.741 =w copy 1457159742 + storew %.741, %.740 + %.742 =l add %.325, 992 + %.743 =w copy 673000678 + storew %.743, %.742 + %.744 =l add %.325, 996 + %.745 =w copy 2013111086 + storew %.745, %.744 + %.746 =l add %.325, 1000 + %.747 =w copy 713487104 + storew %.747, %.746 + %.748 =l add %.325, 1004 + storew 0, %.748 + %.749 =l add %.325, 1008 + %.750 =w copy 59 + storeb %.750, %.749 + %.751 =l add %.325, 1009 + storeb 0, %.751 + %.752 =l add %.325, 1010 + storeh 0, %.752 + %.753 =l add %.325, 1012 + storew 0, %.753 + %.754 =l add %.325, 1016 + storel 5846713185812282113, %.754 + %.755 =l add %.325, 1024 + %.756 =w copy 3470287970 + storew %.756, %.755 + %.757 =l add %.325, 1028 + storew 0, %.757 + %.758 =l add %.325, 1032 + storel 12566983408779698474, %.758 + %.759 =l add %.325, 1040 + %.760 =w copy 848682309 + storew %.760, %.759 + %.761 =l add %.325, 1044 + %.762 =w copy 5 + storew %.762, %.761 + %.763 =l add %.325, 1048 + %.764 =w copy 462078022 + storew %.764, %.763 + %.765 =l add %.325, 1052 + %.766 =l extsw 0 + %.767 =l sub %.766, 7 + %.768 =w copy %.767 + storew %.768, %.765 + %.769 =l add %.325, 1056 + %.770 =l extsw 0 + %.771 =l sub %.770, 1 + %.772 =w copy %.771 + storew %.772, %.769 + %.773 =l add %.325, 1060 + storew 0, %.773 + %.774 =l add %.325, 1064 + %.775 =w copy 30 + storeb %.775, %.774 + %.776 =l add %.325, 1065 + storeb 0, %.776 + %.777 =l add %.325, 1066 + storeh 0, %.777 + %.778 =l add %.325, 1068 + storew 0, %.778 + %.779 =l add %.325, 1072 + storel 4531615791379082412, %.779 + %.780 =l add %.325, 1080 + %.781 =w copy 3542425067 + storew %.781, %.780 + %.782 =l add %.325, 1084 + storew 0, %.782 + %.783 =l add %.325, 1088 + %.784 =l copy 18446744073709551615 + storel %.784, %.783 + %.785 =l add %.325, 1096 + %.786 =w copy 2349175835 + storew %.786, %.785 + %.787 =l add %.325, 1100 + %.788 =w copy 1457159742 + storew %.788, %.787 + %.789 =l add %.325, 1104 + %.790 =w copy 673000678 + storew %.790, %.789 + %.791 =l add %.325, 1108 + %.792 =w copy 2013111086 + storew %.792, %.791 + %.793 =l add %.325, 1112 + %.794 =w copy 713487104 + storew %.794, %.793 + %.795 =l add %.325, 1116 + storew 0, %.795 + %.796 =l add %.325, 1120 + %.797 =w copy 59 + storeb %.797, %.796 + %.798 =l add %.325, 1121 + storeb 0, %.798 + %.799 =l add %.325, 1122 + storeh 0, %.799 + %.800 =l add %.325, 1124 + storew 0, %.800 + %.801 =l add %.325, 1128 + storel 5846713185812282113, %.801 + %.802 =l add %.325, 1136 + %.803 =w copy 3470287970 + storew %.803, %.802 + %.804 =l add %.325, 1140 + storew 0, %.804 + %.805 =l add %.325, 1144 + storel 12566983408779698474, %.805 + %.806 =l add %.325, 1152 + %.807 =w copy 848682309 + storew %.807, %.806 + %.808 =l add %.325, 1156 + %.809 =w copy 5 + storew %.809, %.808 + %.810 =l add %.325, 1160 + %.811 =w copy 462078022 + storew %.811, %.810 + %.812 =l add %.325, 1164 + %.813 =l extsw 0 + %.814 =l sub %.813, 7 + %.815 =w copy %.814 + storew %.815, %.812 + %.816 =l add %.325, 1168 + %.817 =l extsw 0 + %.818 =l sub %.817, 1 + %.819 =w copy %.818 + storew %.819, %.816 + %.820 =l add %.325, 1172 + storew 0, %.820 + %.821 =l add %.325, 1176 + %.822 =w copy 30 + storeb %.822, %.821 + %.823 =l add %.325, 1177 + storeb 0, %.823 + %.824 =l add %.325, 1178 + storeh 0, %.824 + %.825 =l add %.325, 1180 + storew 0, %.825 + %.826 =l add %.325, 1184 + storel 4531615791379082412, %.826 + %.827 =l add %.325, 1192 + %.828 =w copy 3542425067 + storew %.828, %.827 + %.829 =l add %.325, 1196 + storew 0, %.829 + %.830 =l add %.325, 1200 + %.831 =l copy 18446744073709551615 + storel %.831, %.830 + %.832 =l add %.325, 1208 + %.833 =w copy 2349175835 + storew %.833, %.832 + %.834 =l add %.325, 1212 + %.835 =w copy 1457159742 + storew %.835, %.834 + %.836 =l add %.325, 1216 + %.837 =w copy 673000678 + storew %.837, %.836 + %.838 =l add %.325, 1220 + %.839 =w copy 2013111086 + storew %.839, %.838 + %.840 =l add %.325, 1224 + %.841 =w copy 713487104 + storew %.841, %.840 + %.842 =l add %.325, 1228 + storew 0, %.842 + %.843 =l add %.325, 1232 + %.844 =w copy 59 + storeb %.844, %.843 + %.845 =l add %.325, 1233 + storeb 0, %.845 + %.846 =l add %.325, 1234 + storeh 0, %.846 + %.847 =l add %.325, 1236 + storew 0, %.847 + %.848 =l add %.325, 1240 + storel 5846713185812282113, %.848 + %.849 =l add %.325, 1248 + %.850 =w copy 3470287970 + storew %.850, %.849 + %.851 =l add %.325, 1252 + storew 0, %.851 + %.852 =l add %.325, 1256 + storel 12566983408779698474, %.852 + %.853 =l add %.325, 1264 + %.854 =w copy 848682309 + storew %.854, %.853 + %.855 =l add %.325, 1268 + %.856 =w copy 5 + storew %.856, %.855 + %.857 =l add %.325, 1272 + %.858 =w copy 462078022 + storew %.858, %.857 + %.859 =l add %.325, 1276 + %.860 =l extsw 0 + %.861 =l sub %.860, 7 + %.862 =w copy %.861 + storew %.862, %.859 + %.863 =l add %.325, 1280 + %.864 =l extsw 0 + %.865 =l sub %.864, 1 + %.866 =w copy %.865 + storew %.866, %.863 + %.867 =l add %.325, 1284 + storew 0, %.867 + %.868 =l add %.325, 1288 + %.869 =w copy 30 + storeb %.869, %.868 + %.870 =l add %.325, 1289 + storeb 0, %.870 + %.871 =l add %.325, 1290 + storeh 0, %.871 + %.872 =l add %.325, 1292 + storew 0, %.872 + %.873 =l add %.325, 1296 + storel 4531615791379082412, %.873 + %.874 =l add %.325, 1304 + %.875 =w copy 3542425067 + storew %.875, %.874 + %.876 =l add %.325, 1308 + storew 0, %.876 + %.877 =l add %.325, 1312 + %.878 =l copy 18446744073709551615 + storel %.878, %.877 + %.879 =l add %.325, 1320 + %.880 =w copy 2349175835 + storew %.880, %.879 + %.881 =l add %.325, 1324 + %.882 =w copy 1457159742 + storew %.882, %.881 + %.883 =l add %.325, 1328 + %.884 =w copy 673000678 + storew %.884, %.883 + %.885 =l add %.325, 1332 + %.886 =w copy 2013111086 + storew %.886, %.885 + %.887 =l add %.325, 1336 + %.888 =w copy 713487104 + storew %.888, %.887 + %.889 =l add %.325, 1340 + storew 0, %.889 + %.890 =l add %.325, 1344 + %.891 =w copy 59 + storeb %.891, %.890 + %.892 =l add %.325, 1345 + storeb 0, %.892 + %.893 =l add %.325, 1346 + storeh 0, %.893 + %.894 =l add %.325, 1348 + storew 0, %.894 + %.895 =l add %.325, 1352 + storel 5846713185812282113, %.895 + %.896 =l add %.325, 1360 + %.897 =w copy 3470287970 + storew %.897, %.896 + %.898 =l add %.325, 1364 + storew 0, %.898 + %.899 =l add %.325, 1368 + storel 12566983408779698474, %.899 + %.900 =l add %.325, 1376 + %.901 =w copy 848682309 + storew %.901, %.900 + %.902 =l add %.325, 1380 + %.903 =w copy 5 + storew %.903, %.902 + %.904 =l add %.325, 1384 + %.905 =w copy 462078022 + storew %.905, %.904 + %.906 =l add %.325, 1388 + %.907 =l extsw 0 + %.908 =l sub %.907, 7 + %.909 =w copy %.908 + storew %.909, %.906 + %.910 =l add %.325, 1392 + %.911 =l extsw 0 + %.912 =l sub %.911, 1 + %.913 =w copy %.912 + storew %.913, %.910 + %.914 =l add %.325, 1396 + storew 0, %.914 + %.915 =l add %.325, 1400 + %.916 =w copy 30 + storeb %.916, %.915 + %.917 =l add %.325, 1401 + storeb 0, %.917 + %.918 =l add %.325, 1402 + storeh 0, %.918 + %.919 =l add %.325, 1404 + storew 0, %.919 + %.920 =l add %.325, 1408 + storel 4531615791379082412, %.920 + %.921 =l add %.325, 1416 + %.922 =w copy 3542425067 + storew %.922, %.921 + %.923 =l add %.325, 1420 + storew 0, %.923 + %.924 =l add %.325, 1424 + %.925 =l copy 18446744073709551615 + storel %.925, %.924 + %.926 =l add %.325, 1432 + %.927 =w copy 2349175835 + storew %.927, %.926 + %.928 =l add %.325, 1436 + %.929 =w copy 1457159742 + storew %.929, %.928 + %.930 =l add %.325, 1440 + %.931 =w copy 673000678 + storew %.931, %.930 + %.932 =l add %.325, 1444 + %.933 =w copy 2013111086 + storew %.933, %.932 + %.934 =l add %.325, 1448 + %.935 =w copy 713487104 + storew %.935, %.934 + %.936 =l add %.325, 1452 + storew 0, %.936 + %.937 =l add %.325, 1456 + %.938 =w copy 59 + storeb %.938, %.937 + %.939 =l add %.325, 1457 + storeb 0, %.939 + %.940 =l add %.325, 1458 + storeh 0, %.940 + %.941 =l add %.325, 1460 + storew 0, %.941 + %.942 =l add %.325, 1464 + storel 5846713185812282113, %.942 + %.943 =l add %.325, 1472 + %.944 =w copy 3470287970 + storew %.944, %.943 + %.945 =l add %.325, 1476 + storew 0, %.945 + %.946 =l add %.325, 1480 + storel 12566983408779698474, %.946 + %.947 =l add %.325, 1488 + %.948 =w copy 848682309 + storew %.948, %.947 + %.949 =l add %.325, 1492 + %.950 =w copy 5 + storew %.950, %.949 + %.951 =l add %.325, 1496 + %.952 =w copy 462078022 + storew %.952, %.951 + %.953 =l add %.325, 1500 + %.954 =l extsw 0 + %.955 =l sub %.954, 7 + %.956 =w copy %.955 + storew %.956, %.953 + %.957 =l add %.325, 1504 + %.958 =l extsw 0 + %.959 =l sub %.958, 1 + %.960 =w copy %.959 + storew %.960, %.957 + %.961 =l add %.325, 1508 + storew 0, %.961 + %.962 =l add %.325, 1512 + %.963 =w copy 30 + storeb %.963, %.962 + %.964 =l add %.325, 1513 + storeb 0, %.964 + %.965 =l add %.325, 1514 + storeh 0, %.965 + %.966 =l add %.325, 1516 + storew 0, %.966 + %.967 =l add %.325, 1520 + storel 4531615791379082412, %.967 + %.968 =l add %.325, 1528 + %.969 =w copy 3542425067 + storew %.969, %.968 + %.970 =l add %.325, 1532 + storew 0, %.970 + %.971 =l add %.325, 1536 + %.972 =l copy 18446744073709551615 + storel %.972, %.971 + %.973 =l add %.325, 1544 + %.974 =w copy 2349175835 + storew %.974, %.973 + %.975 =l add %.325, 1548 + %.976 =w copy 1457159742 + storew %.976, %.975 + %.977 =l add %.325, 1552 + %.978 =w copy 673000678 + storew %.978, %.977 + %.979 =l add %.325, 1556 + %.980 =w copy 2013111086 + storew %.980, %.979 + %.981 =l add %.325, 1560 + %.982 =w copy 713487104 + storew %.982, %.981 + %.983 =l add %.325, 1564 + storew 0, %.983 + %.984 =l add %.325, 1568 + %.985 =w copy 59 + storeb %.985, %.984 + %.986 =l add %.325, 1569 + storeb 0, %.986 + %.987 =l add %.325, 1570 + storeh 0, %.987 + %.988 =l add %.325, 1572 + storew 0, %.988 + %.989 =l add %.325, 1576 + storel 5846713185812282113, %.989 + %.990 =l add %.325, 1584 + %.991 =w copy 3470287970 + storew %.991, %.990 + %.992 =l add %.325, 1588 + storew 0, %.992 + %.993 =l add %.325, 1592 + storel 12566983408779698474, %.993 + %.994 =l add %.325, 1600 + %.995 =w copy 848682309 + storew %.995, %.994 + %.996 =l add %.325, 1604 + %.997 =w copy 5 + storew %.997, %.996 + %.998 =l add %.325, 1608 + %.999 =w copy 462078022 + storew %.999, %.998 + %.1000 =l add %.325, 1612 + %.1001 =l extsw 0 + %.1002 =l sub %.1001, 7 + %.1003 =w copy %.1002 + storew %.1003, %.1000 + %.1004 =l add %.325, 1616 + %.1005 =l extsw 0 + %.1006 =l sub %.1005, 1 + %.1007 =w copy %.1006 + storew %.1007, %.1004 + %.1008 =l add %.325, 1620 + storew 0, %.1008 + %.1009 =l add %.325, 1624 + %.1010 =w copy 30 + storeb %.1010, %.1009 + %.1011 =l add %.325, 1625 + storeb 0, %.1011 + %.1012 =l add %.325, 1626 + storeh 0, %.1012 + %.1013 =l add %.325, 1628 + storew 0, %.1013 + %.1014 =l add %.325, 1632 + storel 4531615791379082412, %.1014 + %.1015 =l add %.325, 1640 + %.1016 =w copy 3542425067 + storew %.1016, %.1015 + %.1017 =l add %.325, 1644 + storew 0, %.1017 + %.1018 =l add %.325, 1648 + %.1019 =l copy 18446744073709551615 + storel %.1019, %.1018 + %.1020 =l add %.325, 1656 + %.1021 =w copy 2349175835 + storew %.1021, %.1020 + %.1022 =l add %.325, 1660 + %.1023 =w copy 1457159742 + storew %.1023, %.1022 + %.1024 =l add %.325, 1664 + %.1025 =w copy 673000678 + storew %.1025, %.1024 + %.1026 =l add %.325, 1668 + %.1027 =w copy 2013111086 + storew %.1027, %.1026 + %.1028 =l add %.325, 1672 + %.1029 =w copy 713487104 + storew %.1029, %.1028 + %.1030 =l add %.325, 1676 + storew 0, %.1030 + %.1031 =l add %.325, 1680 + %.1032 =w copy 59 + storeb %.1032, %.1031 + %.1033 =l add %.325, 1681 + storeb 0, %.1033 + %.1034 =l add %.325, 1682 + storeh 0, %.1034 + %.1035 =l add %.325, 1684 + storew 0, %.1035 + %.1036 =l add %.325, 1688 + storel 5846713185812282113, %.1036 + %.1037 =l add %.325, 1696 + %.1038 =w copy 3470287970 + storew %.1038, %.1037 + %.1039 =l add %.325, 1700 + storew 0, %.1039 + %.1040 =l add %.325, 1704 + storel 12566983408779698474, %.1040 + %.1041 =l add %.325, 1712 + %.1042 =w copy 848682309 + storew %.1042, %.1041 + %.1043 =l add %.325, 1716 + %.1044 =w copy 5 + storew %.1044, %.1043 + %.1045 =l add %.325, 1720 + %.1046 =w copy 462078022 + storew %.1046, %.1045 + %.1047 =l add %.325, 1724 + %.1048 =l extsw 0 + %.1049 =l sub %.1048, 7 + %.1050 =w copy %.1049 + storew %.1050, %.1047 + %.1051 =l add %.325, 1728 + %.1052 =l extsw 0 + %.1053 =l sub %.1052, 1 + %.1054 =w copy %.1053 + storew %.1054, %.1051 + %.1055 =l add %.325, 1732 + storew 0, %.1055 + %.1056 =l add %.325, 1736 + %.1057 =w copy 30 + storeb %.1057, %.1056 + %.1058 =l add %.325, 1737 + storeb 0, %.1058 + %.1059 =l add %.325, 1738 + storeh 0, %.1059 + %.1060 =l add %.325, 1740 + storew 0, %.1060 + %.1061 =l add %.325, 1744 + storel 4531615791379082412, %.1061 + %.1062 =l add %.325, 1752 + %.1063 =w copy 3542425067 + storew %.1063, %.1062 + %.1064 =l add %.325, 1756 + storew 0, %.1064 + %.1065 =l add %.325, 1760 + %.1066 =l copy 18446744073709551615 + storel %.1066, %.1065 + %.1067 =l add %.325, 1768 + %.1068 =w copy 2349175835 + storew %.1068, %.1067 + %.1069 =l add %.325, 1772 + %.1070 =w copy 1457159742 + storew %.1070, %.1069 + %.1071 =l add %.325, 1776 + %.1072 =w copy 673000678 + storew %.1072, %.1071 + %.1073 =l add %.325, 1780 + %.1074 =w copy 2013111086 + storew %.1074, %.1073 + %.1075 =l add %.325, 1784 + %.1076 =w copy 713487104 + storew %.1076, %.1075 + %.1077 =l add %.325, 1788 + storew 0, %.1077 + %.1078 =l add %.325, 1792 + %.1079 =w copy 59 + storeb %.1079, %.1078 + %.1080 =l add %.325, 1793 + storeb 0, %.1080 + %.1081 =l add %.325, 1794 + storeh 0, %.1081 + %.1082 =l add %.325, 1796 + storew 0, %.1082 + %.1083 =l add %.325, 1800 + storel 5846713185812282113, %.1083 + %.1084 =l add %.325, 1808 + %.1085 =w copy 3470287970 + storew %.1085, %.1084 + %.1086 =l add %.325, 1812 + storew 0, %.1086 + %.1087 =l add %.325, 1816 + storel 12566983408779698474, %.1087 + %.1088 =l add %.325, 1824 + %.1089 =w copy 848682309 + storew %.1089, %.1088 + %.1090 =l add %.325, 1828 + %.1091 =w copy 5 + storew %.1091, %.1090 + %.1092 =l add %.325, 1832 + %.1093 =w copy 462078022 + storew %.1093, %.1092 + %.1094 =l add %.325, 1836 + %.1095 =l extsw 0 + %.1096 =l sub %.1095, 7 + %.1097 =w copy %.1096 + storew %.1097, %.1094 + %.1098 =l add %.325, 1840 + %.1099 =l extsw 0 + %.1100 =l sub %.1099, 1 + %.1101 =w copy %.1100 + storew %.1101, %.1098 + %.1102 =l add %.325, 1844 + storew 0, %.1102 + %.1103 =l add %.325, 1848 + %.1104 =w copy 30 + storeb %.1104, %.1103 + %.1105 =l add %.325, 1849 + storeb 0, %.1105 + %.1106 =l add %.325, 1850 + storeh 0, %.1106 + %.1107 =l add %.325, 1852 + storew 0, %.1107 + %.1108 =l add %.325, 1856 + storel 4531615791379082412, %.1108 + %.1109 =l add %.325, 1864 + %.1110 =w copy 3542425067 + storew %.1110, %.1109 + %.1111 =l add %.325, 1868 + storew 0, %.1111 + %.1112 =l add %.325, 1872 + %.1113 =l copy 18446744073709551615 + storel %.1113, %.1112 + %.1114 =l add %.325, 1880 + %.1115 =w copy 2349175835 + storew %.1115, %.1114 + %.1116 =l add %.325, 1884 + %.1117 =w copy 1457159742 + storew %.1117, %.1116 + %.1118 =l add %.325, 1888 + %.1119 =w copy 673000678 + storew %.1119, %.1118 + %.1120 =l add %.325, 1892 + %.1121 =w copy 2013111086 + storew %.1121, %.1120 + %.1122 =l add %.325, 1896 + %.1123 =w copy 713487104 + storew %.1123, %.1122 + %.1124 =l add %.325, 1900 + storew 0, %.1124 + %.1125 =l add %.325, 1904 + %.1126 =w copy 59 + storeb %.1126, %.1125 + %.1127 =l add %.325, 1905 + storeb 0, %.1127 + %.1128 =l add %.325, 1906 + storeh 0, %.1128 + %.1129 =l add %.325, 1908 + storew 0, %.1129 + %.1130 =l add %.325, 1912 + storel 5846713185812282113, %.1130 + %.1131 =l add %.325, 1920 + %.1132 =w copy 3470287970 + storew %.1132, %.1131 + %.1133 =l add %.325, 1924 + storew 0, %.1133 + %.1134 =l add %.325, 1928 + storel 12566983408779698474, %.1134 + %.1135 =l add %.325, 1936 + %.1136 =w copy 848682309 + storew %.1136, %.1135 + %.1137 =l add %.325, 1940 + %.1138 =w copy 5 + storew %.1138, %.1137 + %.1139 =l add %.325, 1944 + %.1140 =w copy 462078022 + storew %.1140, %.1139 + %.1141 =l add %.325, 1948 + %.1142 =l extsw 0 + %.1143 =l sub %.1142, 7 + %.1144 =w copy %.1143 + storew %.1144, %.1141 + %.1145 =l add %.325, 1952 + %.1146 =l extsw 0 + %.1147 =l sub %.1146, 1 + %.1148 =w copy %.1147 + storew %.1148, %.1145 + %.1149 =l add %.325, 1956 + storew 0, %.1149 + %.1150 =l add %.325, 1960 + %.1151 =w copy 30 + storeb %.1151, %.1150 + %.1152 =l add %.325, 1961 + storeb 0, %.1152 + %.1153 =l add %.325, 1962 + storeh 0, %.1153 + %.1154 =l add %.325, 1964 + storew 0, %.1154 + %.1155 =l add %.325, 1968 + storel 4531615791379082412, %.1155 + %.1156 =l add %.325, 1976 + %.1157 =w copy 3542425067 + storew %.1157, %.1156 + %.1158 =l add %.325, 1980 + storew 0, %.1158 + %.1159 =l add %.325, 1984 + %.1160 =l copy 18446744073709551615 + storel %.1160, %.1159 + %.1161 =l add %.325, 1992 + %.1162 =w copy 2349175835 + storew %.1162, %.1161 + %.1163 =l add %.325, 1996 + %.1164 =w copy 1457159742 + storew %.1164, %.1163 + %.1165 =l add %.325, 2000 + %.1166 =w copy 673000678 + storew %.1166, %.1165 + %.1167 =l add %.325, 2004 + %.1168 =w copy 2013111086 + storew %.1168, %.1167 + %.1169 =l add %.325, 2008 + %.1170 =w copy 713487104 + storew %.1170, %.1169 + %.1171 =l add %.325, 2012 + storew 0, %.1171 + %.1172 =l add %.325, 2016 + %.1173 =w copy 59 + storeb %.1173, %.1172 + %.1174 =l add %.325, 2017 + storeb 0, %.1174 + %.1175 =l add %.325, 2018 + storeh 0, %.1175 + %.1176 =l add %.325, 2020 + storew 0, %.1176 + %.1177 =l add %.325, 2024 + storel 5846713185812282113, %.1177 + %.1178 =l add %.325, 2032 + %.1179 =w copy 3470287970 + storew %.1179, %.1178 + %.1180 =l add %.325, 2036 + storew 0, %.1180 + %.1181 =l add %.325, 2040 + storel 12566983408779698474, %.1181 + %.1182 =l add %.325, 2048 + %.1183 =w copy 848682309 + storew %.1183, %.1182 + %.1184 =l add %.325, 2052 + %.1185 =w copy 5 + storew %.1185, %.1184 + %.1186 =l add %.325, 2056 + %.1187 =w copy 462078022 + storew %.1187, %.1186 + %.1188 =l add %.325, 2060 + %.1189 =l extsw 0 + %.1190 =l sub %.1189, 7 + %.1191 =w copy %.1190 + storew %.1191, %.1188 + %.1192 =l add %.325, 2064 + %.1193 =l extsw 0 + %.1194 =l sub %.1193, 1 + %.1195 =w copy %.1194 + storew %.1195, %.1192 + %.1196 =l add %.325, 2068 + storew 0, %.1196 + %.1197 =l add %.325, 2072 + %.1198 =w copy 30 + storeb %.1198, %.1197 + %.1199 =l add %.325, 2073 + storeb 0, %.1199 + %.1200 =l add %.325, 2074 + storeh 0, %.1200 + %.1201 =l add %.325, 2076 + storew 0, %.1201 + %.1202 =l add %.325, 2080 + storel 4531615791379082412, %.1202 + %.1203 =l add %.325, 2088 + %.1204 =w copy 3542425067 + storew %.1204, %.1203 + %.1205 =l add %.325, 2092 + storew 0, %.1205 + %.1206 =l add %.325, 2096 + %.1207 =l copy 18446744073709551615 + storel %.1207, %.1206 + %.1208 =l add %.325, 2104 + %.1209 =w copy 2349175835 + storew %.1209, %.1208 + %.1210 =l add %.325, 2108 + %.1211 =w copy 1457159742 + storew %.1211, %.1210 + %.1212 =l add %.325, 2112 + %.1213 =w copy 673000678 + storew %.1213, %.1212 + %.1214 =l add %.325, 2116 + %.1215 =w copy 2013111086 + storew %.1215, %.1214 + %.1216 =l add %.325, 2120 + %.1217 =w copy 713487104 + storew %.1217, %.1216 + %.1218 =l add %.325, 2124 + storew 0, %.1218 + %.1219 =l add %.325, 2128 + %.1220 =w copy 59 + storeb %.1220, %.1219 + %.1221 =l add %.325, 2129 + storeb 0, %.1221 + %.1222 =l add %.325, 2130 + storeh 0, %.1222 + %.1223 =l add %.325, 2132 + storew 0, %.1223 + %.1224 =l add %.325, 2136 + storel 5846713185812282113, %.1224 + %.1225 =l add %.325, 2144 + %.1226 =w copy 3470287970 + storew %.1226, %.1225 + %.1227 =l add %.325, 2148 + storew 0, %.1227 + %.1228 =l add %.325, 2152 + storel 12566983408779698474, %.1228 + %.1229 =l add %.325, 2160 + %.1230 =w copy 848682309 + storew %.1230, %.1229 + %.1231 =l add %.325, 2164 + %.1232 =w copy 5 + storew %.1232, %.1231 + %.1233 =l add %.325, 2168 + %.1234 =w copy 462078022 + storew %.1234, %.1233 + %.1235 =l add %.325, 2172 + %.1236 =l extsw 0 + %.1237 =l sub %.1236, 7 + %.1238 =w copy %.1237 + storew %.1238, %.1235 + %.1239 =l add %.325, 2176 + %.1240 =l extsw 0 + %.1241 =l sub %.1240, 1 + %.1242 =w copy %.1241 + storew %.1242, %.1239 + %.1243 =l add %.325, 2180 + storew 0, %.1243 + %.1244 =l add %.325, 2184 + %.1245 =w copy 30 + storeb %.1245, %.1244 + %.1246 =l add %.325, 2185 + storeb 0, %.1246 + %.1247 =l add %.325, 2186 + storeh 0, %.1247 + %.1248 =l add %.325, 2188 + storew 0, %.1248 + %.1249 =l add %.325, 2192 + storel 4531615791379082412, %.1249 + %.1250 =l add %.325, 2200 + %.1251 =w copy 3542425067 + storew %.1251, %.1250 + %.1252 =l add %.325, 2204 + storew 0, %.1252 + %.1253 =l add %.325, 2208 + %.1254 =l copy 18446744073709551615 + storel %.1254, %.1253 + %.1255 =l add %.325, 2216 + %.1256 =w copy 2349175835 + storew %.1256, %.1255 + %.1257 =l add %.325, 2220 + %.1258 =w copy 1457159742 + storew %.1258, %.1257 + %.1259 =l add %.325, 2224 + %.1260 =w copy 673000678 + storew %.1260, %.1259 + %.1261 =l add %.325, 2228 + %.1262 =w copy 2013111086 + storew %.1262, %.1261 + %.1263 =l add %.325, 2232 + %.1264 =w copy 713487104 + storew %.1264, %.1263 + %.1265 =l add %.325, 2236 + storew 0, %.1265 + %.1266 =l add %.325, 2240 + %.1267 =w copy 59 + storeb %.1267, %.1266 + %.1268 =l add %.325, 2241 + storeb 0, %.1268 + %.1269 =l add %.325, 2242 + storeh 0, %.1269 + %.1270 =l add %.325, 2244 + storew 0, %.1270 + %.1271 =l add %.325, 2248 + storel 5846713185812282113, %.1271 + %.1272 =l add %.325, 2256 + %.1273 =w copy 3470287970 + storew %.1273, %.1272 + %.1274 =l add %.325, 2260 + storew 0, %.1274 + %.1275 =l add %.325, 2264 + storel 12566983408779698474, %.1275 + %.1276 =l add %.325, 2272 + %.1277 =w copy 848682309 + storew %.1277, %.1276 + %.1278 =l add %.325, 2276 + %.1279 =w copy 5 + storew %.1279, %.1278 + %.1280 =l add %.325, 2280 + %.1281 =w copy 462078022 + storew %.1281, %.1280 + %.1282 =l add %.325, 2284 + %.1283 =l extsw 0 + %.1284 =l sub %.1283, 7 + %.1285 =w copy %.1284 + storew %.1285, %.1282 + %.1286 =l add %.325, 2288 + %.1287 =l extsw 0 + %.1288 =l sub %.1287, 1 + %.1289 =w copy %.1288 + storew %.1289, %.1286 + %.1290 =l add %.325, 2292 + storew 0, %.1290 + %.1291 =l add %.325, 2296 + %.1292 =w copy 30 + storeb %.1292, %.1291 + %.1293 =l add %.325, 2297 + storeb 0, %.1293 + %.1294 =l add %.325, 2298 + storeh 0, %.1294 + %.1295 =l add %.325, 2300 + storew 0, %.1295 + %.1296 =l add %.325, 2304 + storel 4531615791379082412, %.1296 + %.1297 =l add %.325, 2312 + %.1298 =w copy 3542425067 + storew %.1298, %.1297 + %.1299 =l add %.325, 2316 + storew 0, %.1299 + %.1300 =l add %.325, 2320 + %.1301 =l copy 18446744073709551615 + storel %.1301, %.1300 + %.1302 =l add %.325, 2328 + %.1303 =w copy 2349175835 + storew %.1303, %.1302 + %.1304 =l add %.325, 2332 + %.1305 =w copy 1457159742 + storew %.1305, %.1304 + %.1306 =l add %.325, 2336 + %.1307 =w copy 673000678 + storew %.1307, %.1306 + %.1308 =l add %.325, 2340 + %.1309 =w copy 2013111086 + storew %.1309, %.1308 + %.1310 =l add %.325, 2344 + %.1311 =w copy 713487104 + storew %.1311, %.1310 + %.1312 =l add %.325, 2348 + storew 0, %.1312 + %.1313 =l add %.325, 2352 + %.1314 =w copy 59 + storeb %.1314, %.1313 + %.1315 =l add %.325, 2353 + storeb 0, %.1315 + %.1316 =l add %.325, 2354 + storeh 0, %.1316 + %.1317 =l add %.325, 2356 + storew 0, %.1317 + %.1318 =l add %.325, 2360 + storel 5846713185812282113, %.1318 + %.1319 =l add %.325, 2368 + %.1320 =w copy 3470287970 + storew %.1320, %.1319 + %.1321 =l add %.325, 2372 + storew 0, %.1321 + %.1322 =l add %.325, 2376 + storel 12566983408779698474, %.1322 + %.1323 =l add %.325, 2384 + %.1324 =w copy 848682309 + storew %.1324, %.1323 + %.1325 =l add %.325, 2388 + %.1326 =w copy 5 + storew %.1326, %.1325 + %.1327 =l add %.325, 2392 + %.1328 =w copy 462078022 + storew %.1328, %.1327 + %.1329 =l add %.325, 2396 + %.1330 =l extsw 0 + %.1331 =l sub %.1330, 7 + %.1332 =w copy %.1331 + storew %.1332, %.1329 + %.1333 =l add %.325, 2400 + %.1334 =l extsw 0 + %.1335 =l sub %.1334, 1 + %.1336 =w copy %.1335 + storew %.1336, %.1333 + %.1337 =l add %.325, 2404 + storew 0, %.1337 + %.1338 =l add %.325, 2408 + %.1339 =w copy 30 + storeb %.1339, %.1338 + %.1340 =l add %.325, 2409 + storeb 0, %.1340 + %.1341 =l add %.325, 2410 + storeh 0, %.1341 + %.1342 =l add %.325, 2412 + storew 0, %.1342 + %.1343 =l add %.325, 2416 + storel 4531615791379082412, %.1343 + %.1344 =l add %.325, 2424 + %.1345 =w copy 3542425067 + storew %.1345, %.1344 + %.1346 =l add %.325, 2428 + storew 0, %.1346 + %.1347 =l add %.325, 2432 + %.1348 =l copy 18446744073709551615 + storel %.1348, %.1347 + %.1349 =l add %.325, 2440 + %.1350 =w copy 2349175835 + storew %.1350, %.1349 + %.1351 =l add %.325, 2444 + %.1352 =w copy 1457159742 + storew %.1352, %.1351 + %.1353 =l add %.325, 2448 + %.1354 =w copy 673000678 + storew %.1354, %.1353 + %.1355 =l add %.325, 2452 + %.1356 =w copy 2013111086 + storew %.1356, %.1355 + %.1357 =l add %.325, 2456 + %.1358 =w copy 713487104 + storew %.1358, %.1357 + %.1359 =l add %.325, 2460 + storew 0, %.1359 + %.1360 =l add %.325, 2464 + %.1361 =w copy 59 + storeb %.1361, %.1360 + %.1362 =l add %.325, 2465 + storeb 0, %.1362 + %.1363 =l add %.325, 2466 + storeh 0, %.1363 + %.1364 =l add %.325, 2468 + storew 0, %.1364 + %.1365 =l add %.325, 2472 + storel 5846713185812282113, %.1365 + %.1366 =l add %.325, 2480 + %.1367 =w copy 3470287970 + storew %.1367, %.1366 + %.1368 =l add %.325, 2484 + storew 0, %.1368 + %.1369 =l add %.325, 2488 + storel 12566983408779698474, %.1369 + %.1370 =l add %.325, 2496 + %.1371 =w copy 848682309 + storew %.1371, %.1370 + %.1372 =l add %.325, 2500 + %.1373 =w copy 5 + storew %.1373, %.1372 + %.1374 =l add %.325, 2504 + %.1375 =w copy 462078022 + storew %.1375, %.1374 + %.1376 =l add %.325, 2508 + %.1377 =l extsw 0 + %.1378 =l sub %.1377, 7 + %.1379 =w copy %.1378 + storew %.1379, %.1376 + %.1380 =l add %.325, 2512 + %.1381 =l extsw 0 + %.1382 =l sub %.1381, 1 + %.1383 =w copy %.1382 + storew %.1383, %.1380 + %.1384 =l add %.325, 2516 + storew 0, %.1384 + %.1385 =l add %.325, 2520 + %.1386 =w copy 30 + storeb %.1386, %.1385 + %.1387 =l add %.325, 2521 + storeb 0, %.1387 + %.1388 =l add %.325, 2522 + storeh 0, %.1388 + %.1389 =l add %.325, 2524 + storew 0, %.1389 + %.1390 =l add %.325, 2528 + storel 4531615791379082412, %.1390 + %.1391 =l add %.325, 2536 + %.1392 =w copy 3542425067 + storew %.1392, %.1391 + %.1393 =l add %.325, 2540 + storew 0, %.1393 + %.1394 =l add %.325, 2544 + %.1395 =l copy 18446744073709551615 + storel %.1395, %.1394 + %.1396 =l add %.325, 2552 + %.1397 =w copy 2349175835 + storew %.1397, %.1396 + %.1398 =l add %.325, 2556 + %.1399 =w copy 1457159742 + storew %.1399, %.1398 + %.1400 =l add %.325, 2560 + %.1401 =w copy 673000678 + storew %.1401, %.1400 + %.1402 =l add %.325, 2564 + %.1403 =w copy 2013111086 + storew %.1403, %.1402 + %.1404 =l add %.325, 2568 + %.1405 =w copy 713487104 + storew %.1405, %.1404 + %.1406 =l add %.325, 2572 + storew 0, %.1406 + %.1407 =l add %.325, 2576 + %.1408 =w copy 59 + storeb %.1408, %.1407 + %.1409 =l add %.325, 2577 + storeb 0, %.1409 + %.1410 =l add %.325, 2578 + storeh 0, %.1410 + %.1411 =l add %.325, 2580 + storew 0, %.1411 + %.1412 =l add %.325, 2584 + storel 5846713185812282113, %.1412 + %.1413 =l add %.325, 2592 + %.1414 =w copy 3470287970 + storew %.1414, %.1413 + %.1415 =l add %.325, 2596 + storew 0, %.1415 + %.1416 =l add %.325, 2600 + storel 12566983408779698474, %.1416 + %.1417 =l add %.325, 2608 + %.1418 =w copy 848682309 + storew %.1418, %.1417 + %.1419 =l add %.325, 2612 + %.1420 =w copy 5 + storew %.1420, %.1419 + %.1421 =l add %.325, 2616 + %.1422 =w copy 462078022 + storew %.1422, %.1421 + %.1423 =l add %.325, 2620 + %.1424 =l extsw 0 + %.1425 =l sub %.1424, 7 + %.1426 =w copy %.1425 + storew %.1426, %.1423 + %.1427 =l add %.325, 2624 + %.1428 =l extsw 0 + %.1429 =l sub %.1428, 1 + %.1430 =w copy %.1429 + storew %.1430, %.1427 + %.1431 =l add %.325, 2628 + storew 0, %.1431 + %.1432 =l add %.325, 2632 + %.1433 =w copy 30 + storeb %.1433, %.1432 + %.1434 =l add %.325, 2633 + storeb 0, %.1434 + %.1435 =l add %.325, 2634 + storeh 0, %.1435 + %.1436 =l add %.325, 2636 + storew 0, %.1436 + %.1437 =l add %.325, 2640 + storel 4531615791379082412, %.1437 + %.1438 =l add %.325, 2648 + %.1439 =w copy 3542425067 + storew %.1439, %.1438 + %.1440 =l add %.325, 2652 + storew 0, %.1440 + %.1441 =l add %.325, 2656 + %.1442 =l copy 18446744073709551615 + storel %.1442, %.1441 + %.1443 =l add %.325, 2664 + %.1444 =w copy 2349175835 + storew %.1444, %.1443 + %.1445 =l add %.325, 2668 + %.1446 =w copy 1457159742 + storew %.1446, %.1445 + %.1447 =l add %.325, 2672 + %.1448 =w copy 673000678 + storew %.1448, %.1447 + %.1449 =l add %.325, 2676 + %.1450 =w copy 2013111086 + storew %.1450, %.1449 + %.1451 =l add %.325, 2680 + %.1452 =w copy 713487104 + storew %.1452, %.1451 + %.1453 =l add %.325, 2684 + storew 0, %.1453 + %.1454 =l add %.325, 2688 + %.1455 =w copy 59 + storeb %.1455, %.1454 + %.1456 =l add %.325, 2689 + storeb 0, %.1456 + %.1457 =l add %.325, 2690 + storeh 0, %.1457 + %.1458 =l add %.325, 2692 + storew 0, %.1458 + %.1459 =l add %.325, 2696 + storel 5846713185812282113, %.1459 + %.1460 =l add %.325, 2704 + %.1461 =w copy 3470287970 + storew %.1461, %.1460 + %.1462 =l add %.325, 2708 + storew 0, %.1462 + %.1463 =l add %.325, 2712 + storel 12566983408779698474, %.1463 + %.1464 =l add %.325, 2720 + %.1465 =w copy 848682309 + storew %.1465, %.1464 + %.1466 =l add %.325, 2724 + %.1467 =w copy 5 + storew %.1467, %.1466 + %.1468 =l add %.325, 2728 + %.1469 =w copy 462078022 + storew %.1469, %.1468 + %.1470 =l add %.325, 2732 + %.1471 =l extsw 0 + %.1472 =l sub %.1471, 7 + %.1473 =w copy %.1472 + storew %.1473, %.1470 + %.1474 =l add %.325, 2736 + %.1475 =l extsw 0 + %.1476 =l sub %.1475, 1 + %.1477 =w copy %.1476 + storew %.1477, %.1474 + %.1478 =l add %.325, 2740 + storew 0, %.1478 + %.1479 =l add %.325, 2744 + %.1480 =w copy 30 + storeb %.1480, %.1479 + %.1481 =l add %.325, 2745 + storeb 0, %.1481 + %.1482 =l add %.325, 2746 + storeh 0, %.1482 + %.1483 =l add %.325, 2748 + storew 0, %.1483 + %.1484 =l add %.325, 2752 + storel 4531615791379082412, %.1484 + %.1485 =l add %.325, 2760 + %.1486 =w copy 3542425067 + storew %.1486, %.1485 + %.1487 =l add %.325, 2764 + storew 0, %.1487 + %.1488 =l add %.325, 2768 + %.1489 =l copy 18446744073709551615 + storel %.1489, %.1488 + %.1490 =l add %.325, 2776 + %.1491 =w copy 2349175835 + storew %.1491, %.1490 + %.1492 =l add %.325, 2780 + %.1493 =w copy 1457159742 + storew %.1493, %.1492 + %.1494 =l add %.325, 2784 + %.1495 =w copy 673000678 + storew %.1495, %.1494 + %.1496 =l add %.325, 2788 + %.1497 =w copy 2013111086 + storew %.1497, %.1496 + %.1498 =l add %.325, 2792 + %.1499 =w copy 713487104 + storew %.1499, %.1498 + %.1500 =l add %.325, 2796 + storew 0, %.1500 + %.1501 =l add %.325, 2800 + %.1502 =w copy 59 + storeb %.1502, %.1501 + %.1503 =l add %.325, 2801 + storeb 0, %.1503 + %.1504 =l add %.325, 2802 + storeh 0, %.1504 + %.1505 =l add %.325, 2804 + storew 0, %.1505 + %.1506 =l add %.325, 2808 + storel 5846713185812282113, %.1506 + %.1507 =l add %.325, 2816 + %.1508 =w copy 3470287970 + storew %.1508, %.1507 + %.1509 =l add %.325, 2820 + storew 0, %.1509 + %.1510 =l add %.325, 2824 + storel 12566983408779698474, %.1510 + %.1511 =l add %.325, 2832 + %.1512 =w copy 848682309 + storew %.1512, %.1511 + %.1513 =l add %.325, 2836 + %.1514 =w copy 5 + storew %.1514, %.1513 + %.1515 =l add %.325, 2840 + %.1516 =w copy 462078022 + storew %.1516, %.1515 + %.1517 =l add %.325, 2844 + %.1518 =l extsw 0 + %.1519 =l sub %.1518, 7 + %.1520 =w copy %.1519 + storew %.1520, %.1517 + %.1521 =l add %.325, 2848 + %.1522 =l extsw 0 + %.1523 =l sub %.1522, 1 + %.1524 =w copy %.1523 + storew %.1524, %.1521 + %.1525 =l add %.325, 2852 + storew 0, %.1525 + %.1526 =l add %.325, 2856 + %.1527 =w copy 30 + storeb %.1527, %.1526 + %.1528 =l add %.325, 2857 + storeb 0, %.1528 + %.1529 =l add %.325, 2858 + storeh 0, %.1529 + %.1530 =l add %.325, 2860 + storew 0, %.1530 + %.1531 =l add %.325, 2864 + storel 4531615791379082412, %.1531 + %.1532 =l add %.325, 2872 + %.1533 =w copy 3542425067 + storew %.1533, %.1532 + %.1534 =l add %.325, 2876 + storew 0, %.1534 + %.1535 =l add %.325, 2880 + %.1536 =l copy 18446744073709551615 + storel %.1536, %.1535 + %.1537 =l add %.325, 2888 + %.1538 =w copy 2349175835 + storew %.1538, %.1537 + %.1539 =l add %.325, 2892 + %.1540 =w copy 1457159742 + storew %.1540, %.1539 + %.1541 =l add %.325, 2896 + %.1542 =w copy 673000678 + storew %.1542, %.1541 + %.1543 =l add %.325, 2900 + %.1544 =w copy 2013111086 + storew %.1544, %.1543 + %.1545 =l add %.325, 2904 + %.1546 =w copy 713487104 + storew %.1546, %.1545 + %.1547 =l add %.325, 2908 + storew 0, %.1547 + %.1548 =l add %.325, 2912 + %.1549 =w copy 59 + storeb %.1549, %.1548 + %.1550 =l add %.325, 2913 + storeb 0, %.1550 + %.1551 =l add %.325, 2914 + storeh 0, %.1551 + %.1552 =l add %.325, 2916 + storew 0, %.1552 + %.1553 =l add %.325, 2920 + storel 5846713185812282113, %.1553 + %.1554 =l add %.325, 2928 + %.1555 =w copy 3470287970 + storew %.1555, %.1554 + %.1556 =l add %.325, 2932 + storew 0, %.1556 + %.1557 =l add %.325, 2936 + storel 12566983408779698474, %.1557 + %.1558 =l add %.325, 2944 + %.1559 =w copy 848682309 + storew %.1559, %.1558 + %.1560 =l add %.325, 2948 + %.1561 =w copy 5 + storew %.1561, %.1560 + %.1562 =l add %.325, 2952 + %.1563 =w copy 462078022 + storew %.1563, %.1562 + %.1564 =l add %.325, 2956 + %.1565 =l extsw 0 + %.1566 =l sub %.1565, 7 + %.1567 =w copy %.1566 + storew %.1567, %.1564 + %.1568 =l add %.325, 2960 + %.1569 =l extsw 0 + %.1570 =l sub %.1569, 1 + %.1571 =w copy %.1570 + storew %.1571, %.1568 + %.1572 =l add %.325, 2964 + storew 0, %.1572 + %.1573 =l add %.325, 2968 + %.1574 =w copy 30 + storeb %.1574, %.1573 + %.1575 =l add %.325, 2969 + storeb 0, %.1575 + %.1576 =l add %.325, 2970 + storeh 0, %.1576 + %.1577 =l add %.325, 2972 + storew 0, %.1577 + %.1578 =l add %.325, 2976 + storel 4531615791379082412, %.1578 + %.1579 =l add %.325, 2984 + %.1580 =w copy 3542425067 + storew %.1580, %.1579 + %.1581 =l add %.325, 2988 + storew 0, %.1581 + %.1582 =l add %.325, 2992 + %.1583 =l copy 18446744073709551615 + storel %.1583, %.1582 + %.1584 =l add %.325, 3000 + %.1585 =w copy 2349175835 + storew %.1585, %.1584 + %.1586 =l add %.325, 3004 + %.1587 =w copy 1457159742 + storew %.1587, %.1586 + %.1588 =l add %.325, 3008 + %.1589 =w copy 673000678 + storew %.1589, %.1588 + %.1590 =l add %.325, 3012 + %.1591 =w copy 2013111086 + storew %.1591, %.1590 + %.1592 =l add %.325, 3016 + %.1593 =w copy 713487104 + storew %.1593, %.1592 + %.1594 =l add %.325, 3020 + storew 0, %.1594 + %.1595 =l add %.325, 3024 + %.1596 =w copy 59 + storeb %.1596, %.1595 + %.1597 =l add %.325, 3025 + storeb 0, %.1597 + %.1598 =l add %.325, 3026 + storeh 0, %.1598 + %.1599 =l add %.325, 3028 + storew 0, %.1599 + %.1600 =l add %.325, 3032 + storel 5846713185812282113, %.1600 + %.1601 =l add %.325, 3040 + %.1602 =w copy 3470287970 + storew %.1602, %.1601 + %.1603 =l add %.325, 3044 + storew 0, %.1603 + %.1604 =l add %.325, 3048 + storel 12566983408779698474, %.1604 + %.1605 =l add %.325, 3056 + %.1606 =w copy 848682309 + storew %.1606, %.1605 + %.1607 =l add %.325, 3060 + %.1608 =w copy 5 + storew %.1608, %.1607 + %.1609 =l add %.325, 3064 + %.1610 =w copy 462078022 + storew %.1610, %.1609 + %.1611 =l add %.325, 3068 + %.1612 =l extsw 0 + %.1613 =l sub %.1612, 7 + %.1614 =w copy %.1613 + storew %.1614, %.1611 + %.1615 =l add %.325, 3072 + %.1616 =l extsw 0 + %.1617 =l sub %.1616, 1 + %.1618 =w copy %.1617 + storew %.1618, %.1615 + %.1619 =l add %.325, 3076 + storew 0, %.1619 + %.1620 =l add %.325, 3080 + %.1621 =w copy 30 + storeb %.1621, %.1620 + %.1622 =l add %.325, 3081 + storeb 0, %.1622 + %.1623 =l add %.325, 3082 + storeh 0, %.1623 + %.1624 =l add %.325, 3084 + storew 0, %.1624 + %.1625 =l add %.325, 3088 + storel 4531615791379082412, %.1625 + %.1626 =l add %.325, 3096 + %.1627 =w copy 3542425067 + storew %.1627, %.1626 + %.1628 =l add %.325, 3100 + storew 0, %.1628 + %.1629 =l add %.325, 3104 + %.1630 =l copy 18446744073709551615 + storel %.1630, %.1629 + %.1631 =l add %.325, 3112 + %.1632 =w copy 2349175835 + storew %.1632, %.1631 + %.1633 =l add %.325, 3116 + %.1634 =w copy 1457159742 + storew %.1634, %.1633 + %.1635 =l add %.325, 3120 + %.1636 =w copy 673000678 + storew %.1636, %.1635 + %.1637 =l add %.325, 3124 + %.1638 =w copy 2013111086 + storew %.1638, %.1637 + %.1639 =l add %.325, 3128 + %.1640 =w copy 713487104 + storew %.1640, %.1639 + %.1641 =l add %.325, 3132 + storew 0, %.1641 + %.1643 =l add %.1642, 0 + %.1644 =w copy 0 + storeh %.1644, %.1643 + %.1646 =l add %.1645, 0 + %.1647 =w copy 8649 + storeh %.1647, %.1646 + %.1649 =l add %.1648, 0 + %.1650 =l copy 6084821566261148539 + storel %.1650, %.1649 + %.1651 =l add %.1648, 8 + storel 16245754612124257930, %.1651 + %.1652 =l add %.1648, 16 + %.1653 =l copy 4052120349730717228 + storel %.1653, %.1652 + %.1654 =l add %.1648, 24 + %.1655 =l copy 873105079974555151 + storel %.1655, %.1654 + %.1656 =l add %.1648, 32 + %.1657 =l copy 18446744073709551615 + storel %.1657, %.1656 + %.1658 =l add %.1648, 40 + %.1659 =l copy 18446744073709551606 + storel %.1659, %.1658 + %.1660 =l add %.1648, 48 + %.1661 =l copy 2875883040891070095 + storel %.1661, %.1660 + %.1662 =l add %.1648, 56 + %.1663 =l copy 1 + storel %.1663, %.1662 + %.1664 =l add %.1648, 64 + %.1665 =l copy 8230877399174301244 + storel %.1665, %.1664 + %.1666 =l add %.1648, 72 + %.1667 =l copy 0 + storel %.1667, %.1666 + %.1668 =l add %.1648, 80 + %.1669 =l copy 2248553449639285191 + storel %.1669, %.1668 + %.1670 =l add %.1648, 88 + storel 16175365243520763722, %.1670 + %.1671 =l add %.1648, 96 + %.1672 =l copy 18446744073709551615 + storel %.1672, %.1671 + %.1673 =l add %.1648, 104 + storel 16245754612124257930, %.1673 + %.1674 =l add %.1648, 112 + %.1675 =l copy 18446744073709551615 + storel %.1675, %.1674 + %.1676 =l add %.1648, 120 + %.1677 =l copy 8230877399174301244 + storel %.1677, %.1676 + %.1678 =l add %.1648, 128 + %.1679 =l copy 8230877399174301244 + storel %.1679, %.1678 + %.1680 =l add %.1648, 136 + %.1681 =l copy 4 + storel %.1681, %.1680 + %.1682 =l add %.1648, 144 + %.1683 =l copy 1 + storel %.1683, %.1682 + %.1684 =l add %.1648, 152 + %.1685 =l copy 7 + storel %.1685, %.1684 + %.1686 =l add %.1648, 160 + %.1687 =l copy 18446744073709551615 + storel %.1687, %.1686 + %.1688 =l add %.1648, 168 + storel 14224845232216782397, %.1688 + %.1689 =l add %.1648, 176 + %.1690 =l copy 6007172698835695880 + storel %.1690, %.1689 + %.1691 =l add %.1648, 184 + %.1692 =l copy 18446744073709551612 + storel %.1692, %.1691 + %.1693 =l add %.1648, 192 + %.1694 =l copy 0 + storel %.1694, %.1693 + %.1695 =l add %.1648, 200 + %.1696 =l copy 18446744073709551607 + storel %.1696, %.1695 + %.1697 =l add %.1648, 208 + storel 18269964541825259806, %.1697 + %.1698 =l add %.1648, 216 + %.1699 =l copy 0 + storel %.1699, %.1698 + %.1700 =l add %.1648, 224 + storel 18269964541825259806, %.1700 + %.1701 =l add %.1648, 232 + %.1702 =l copy 8230877399174301244 + storel %.1702, %.1701 + %.1703 =l add %.1648, 240 + %.1704 =l copy 0 + storel %.1704, %.1703 + %.1705 =l add %.1648, 248 + %.1706 =l copy 18446744073709551610 + storel %.1706, %.1705 + %.1707 =l add %.1648, 256 + storel 12201917979609006375, %.1707 + %.1708 =l add %.1648, 264 + storel 12800017575156089034, %.1708 + %.1709 =l add %.1648, 272 + storel 14583114485114116895, %.1709 + %.1710 =l add %.1648, 280 + %.1711 =l copy 18446744073709551615 + storel %.1711, %.1710 + %.1712 =l add %.1648, 288 + %.1713 =l copy 18446744073709551607 + storel %.1713, %.1712 + %.1714 =l add %.1648, 296 + %.1715 =l copy 0 + storel %.1715, %.1714 + %.1716 =l add %.1648, 304 + %.1717 =l copy 5937592181530390446 + storel %.1717, %.1716 + %.1718 =l add %.1648, 312 + %.1719 =l copy 7 + storel %.1719, %.1718 + %.1720 =l add %.1648, 320 + %.1721 =l copy 4052120349730717228 + storel %.1721, %.1720 + %.1722 =l add %.1648, 328 + %.1723 =l copy 18446744073709551611 + storel %.1723, %.1722 + %.1724 =l add %.1648, 336 + %.1725 =l copy 3705651564574322605 + storel %.1725, %.1724 + %.1726 =l add %.1648, 344 + storel 16245754612124257930, %.1726 + %.1727 =l add %.1648, 352 + storel 12201917979609006375, %.1727 + %.1728 =l add %.1648, 360 + %.1729 =l copy 18446744073709551615 + storel %.1729, %.1728 + %.1730 =l add %.1648, 368 + %.1731 =l copy 18446744073709551615 + storel %.1731, %.1730 + %.1732 =l add %.1648, 376 + %.1733 =l copy 5937592181530390446 + storel %.1733, %.1732 + %.1734 =l add %.1648, 384 + %.1735 =l copy 5937592181530390446 + storel %.1735, %.1734 + %.1736 =l add %.1648, 392 + %.1737 =l copy 18446744073709551615 + storel %.1737, %.1736 + %.1738 =l add %.1648, 400 + %.1739 =l copy 0 + storel %.1739, %.1738 + %.1740 =l add %.1648, 408 + storel 15873037008906187302, %.1740 + %.1741 =l add %.1648, 416 + %.1742 =l copy 8133712095574703050 + storel %.1742, %.1741 + %.1743 =l add %.1648, 424 + storel 12800017575156089034, %.1743 + %.1744 =l add %.1648, 432 + %.1745 =l copy 1 + storel %.1745, %.1744 + %.1746 =l add %.1648, 440 + %.1747 =l copy 0 + storel %.1747, %.1746 + %.1748 =l add %.1648, 448 + %.1749 =l copy 8230877399174301244 + storel %.1749, %.1748 + %.1750 =l add %.1648, 456 + %.1751 =l copy 18446744073709551607 + storel %.1751, %.1750 + %.1752 =l add %.1648, 464 + storel 18269964541825259806, %.1752 + %.1753 =l add %.1648, 472 + %.1754 =l copy 0 + storel %.1754, %.1753 + %.1755 =l add %.1648, 480 + %.1756 =l copy 3 + storel %.1756, %.1755 + %.1757 =l add %.1648, 488 + storel 10372949673387309524, %.1757 + %.1758 =l add %.1648, 496 + %.1759 =l copy 6084821566261148539 + storel %.1759, %.1758 + %.1760 =l add %.1648, 504 + %.1761 =l copy 18446744073709551607 + storel %.1761, %.1760 + %.1762 =l add %.1648, 512 + %.1763 =l copy 6007172698835695880 + storel %.1763, %.1762 + %.1764 =l add %.1648, 520 + %.1765 =l copy 0 + storel %.1765, %.1764 + %.1766 =l add %.1648, 528 + %.1767 =l copy 0 + storel %.1767, %.1766 + %.1768 =l add %.1648, 536 + %.1769 =l copy 0 + storel %.1769, %.1768 + %.1770 =l add %.1648, 544 + %.1771 =l copy 2875883040891070095 + storel %.1771, %.1770 + %.1772 =l add %.1648, 552 + %.1773 =l copy 4 + storel %.1773, %.1772 + %.1774 =l add %.1648, 560 + %.1775 =l copy 0 + storel %.1775, %.1774 + %.1776 =l add %.1648, 568 + %.1777 =l copy 18446744073709551615 + storel %.1777, %.1776 + %.1778 =l add %.1648, 576 + %.1779 =l copy 7 + storel %.1779, %.1778 + %.1780 =l add %.1648, 584 + storel 15873037008906187302, %.1780 + %.1781 =l add %.1648, 592 + %.1782 =l copy 18446744073709551615 + storel %.1782, %.1781 + %.1783 =l add %.1648, 600 + %.1784 =l copy 18446744073709551615 + storel %.1784, %.1783 + %.1785 =l add %.1648, 608 + storel 18269964541825259806, %.1785 + %.1786 =l add %.1648, 616 + %.1787 =l copy 7 + storel %.1787, %.1786 + %.1788 =l add %.1648, 624 + %.1789 =l copy 18446744073709551607 + storel %.1789, %.1788 + %.1790 =l add %.1648, 632 + %.1791 =l copy 1 + storel %.1791, %.1790 + %.1792 =l add %.1648, 640 + %.1793 =l copy 4052120349730717228 + storel %.1793, %.1792 + %.1794 =l add %.1648, 648 + %.1795 =l copy 2 + storel %.1795, %.1794 + %.1796 =l add %.1648, 656 + %.1797 =l copy 7 + storel %.1797, %.1796 + %.1798 =l add %.1648, 664 + %.1799 =l copy 2 + storel %.1799, %.1798 + %.1800 =l add %.1648, 672 + %.1801 =l copy 4052120349730717228 + storel %.1801, %.1800 + %.1802 =l add %.1648, 680 + %.1803 =l copy 18446744073709551615 + storel %.1803, %.1802 + %.1804 =l add %.1648, 688 + %.1805 =l copy 18446744073709551606 + storel %.1805, %.1804 + %.1806 =l add %.1648, 696 + %.1807 =l copy 0 + storel %.1807, %.1806 + %.1808 =l add %.1648, 704 + %.1809 =l copy 8230877399174301244 + storel %.1809, %.1808 + %.1810 =l add %.1648, 712 + %.1811 =l copy 18446744073709551607 + storel %.1811, %.1810 + %.1812 =l add %.1648, 720 + %.1813 =l copy 0 + storel %.1813, %.1812 + %.1814 =l add %.1648, 728 + storel 14224845232216782397, %.1814 + %.1815 =l add %.1648, 736 + %.1816 =l copy 6084821566261148539 + storel %.1816, %.1815 + %.1817 =l add %.1648, 744 + storel 14991488133450330097, %.1817 + %.1818 =l add %.1648, 752 + %.1819 =l copy 3705651564574322605 + storel %.1819, %.1818 + %.1820 =l add %.1648, 760 + %.1821 =l copy 1 + storel %.1821, %.1820 + %.1822 =l add %.1648, 768 + %.1823 =l copy 0 + storel %.1823, %.1822 + %.1824 =l add %.1648, 776 + %.1825 =l copy 18446744073709551607 + storel %.1825, %.1824 + %.1826 =l add %.1648, 784 + %.1827 =l copy 18446744073709551606 + storel %.1827, %.1826 + %.1828 =l add %.1648, 792 + %.1829 =l copy 18446744073709551607 + storel %.1829, %.1828 + %.1830 =l add %.1648, 800 + %.1831 =l copy 2248553449639285191 + storel %.1831, %.1830 + %.1832 =l add %.1648, 808 + storel 14991488133450330097, %.1832 + %.1833 =l add %.1648, 816 + %.1834 =l copy 8133712095574703050 + storel %.1834, %.1833 + %.1835 =l add %.1648, 824 + %.1836 =l copy 0 + storel %.1836, %.1835 + %.1837 =l add %.1648, 832 + %.1838 =l copy 4052120349730717228 + storel %.1838, %.1837 + %.1839 =l add %.1648, 840 + %.1840 =l copy 18446744073709551607 + storel %.1840, %.1839 + %.1841 =l add %.1648, 848 + %.1842 =l copy 0 + storel %.1842, %.1841 + %.1843 =l add %.1648, 856 + %.1844 =l copy 5937592181530390446 + storel %.1844, %.1843 + %.1845 =l add %.1648, 864 + %.1846 =l copy 7 + storel %.1846, %.1845 + %.1847 =l add %.1648, 872 + %.1848 =l copy 1 + storel %.1848, %.1847 + %.1849 =l add %.1648, 880 + %.1850 =l copy 7 + storel %.1850, %.1849 + %.1851 =l add %.1648, 888 + %.1852 =l copy 873105079974555151 + storel %.1852, %.1851 + %.1853 =l add %.1648, 896 + %.1854 =l copy 3705651564574322605 + storel %.1854, %.1853 + %.1855 =l add %.1648, 904 + storel 10372949673387309524, %.1855 + %.1856 =l add %.1648, 912 + %.1857 =l copy 18446744073709551615 + storel %.1857, %.1856 + %.1858 =l add %.1648, 920 + storel 14234092197388013524, %.1858 + %.1859 =l add %.1648, 928 + %.1860 =l copy 0 + storel %.1860, %.1859 + %.1861 =l add %.1648, 936 + %.1862 =l copy 0 + storel %.1862, %.1861 + %.1863 =l add %.1648, 944 + storel 14234092197388013524, %.1863 + %.1864 =l add %.1648, 952 + %.1865 =l copy 4 + storel %.1865, %.1864 + %.1866 =l add %.1648, 960 + %.1867 =l copy 1 + storel %.1867, %.1866 + %.1868 =l add %.1648, 968 + storel 14991488133450330097, %.1868 + %.1869 =l add %.1648, 976 + storel 12201917979609006375, %.1869 + %.1870 =l add %.1648, 984 + storel 16175365243520763722, %.1870 + %.1871 =l add %.1648, 992 + %.1872 =l copy 6007172698835695880 + storel %.1872, %.1871 + %.1873 =l add %.1648, 1000 + %.1874 =l copy 5937592181530390446 + storel %.1874, %.1873 + %.1875 =l add %.1648, 1008 + %.1876 =l copy 0 + storel %.1876, %.1875 + %.1877 =l add %.1648, 1016 + %.1878 =l copy 18446744073709551608 + storel %.1878, %.1877 + %.1879 =l add %.1648, 1024 + %.1880 =l copy 1 + storel %.1880, %.1879 + %.1881 =l add %.1648, 1032 + %.1882 =l copy 0 + storel %.1882, %.1881 + %.1883 =l add %.1648, 1040 + %.1884 =l copy 876013142962575738 + storel %.1884, %.1883 + %.1885 =l add %.1648, 1048 + storel 14224845232216782397, %.1885 + %.1886 =l add %.1648, 1056 + %.1887 =l copy 4052120349730717228 + storel %.1887, %.1886 + %.1888 =l add %.1648, 1064 + storel 16175365243520763722, %.1888 + %.1889 =l add %.1648, 1072 + %.1890 =l copy 1 + storel %.1890, %.1889 + %.1891 =l add %.1648, 1080 + %.1892 =l copy 1 + storel %.1892, %.1891 + %.1893 =l add %.1648, 1088 + %.1894 =l copy 18446744073709551606 + storel %.1894, %.1893 + %.1895 =l add %.1648, 1096 + %.1896 =l copy 0 + storel %.1896, %.1895 + %.1897 =l add %.1648, 1104 + storel 14234092197388013524, %.1897 + %.1898 =l add %.1648, 1112 + %.1899 =l copy 18446744073709551615 + storel %.1899, %.1898 + %.1900 =l add %.1648, 1120 + %.1901 =l copy 0 + storel %.1901, %.1900 + %.1902 =l add %.1648, 1128 + %.1903 =l copy 2 + storel %.1903, %.1902 + %.1904 =l add %.1648, 1136 + %.1905 =l copy 2248553449639285191 + storel %.1905, %.1904 + %.1906 =l add %.1648, 1144 + storel 10372949673387309524, %.1906 + %.1907 =l add %.1648, 1152 + storel 12201917979609006375, %.1907 + %.1908 =l add %.1648, 1160 + %.1909 =l copy 18446744073709551606 + storel %.1909, %.1908 + %.1910 =l add %.1648, 1168 + storel 18269964541825259806, %.1910 + %.1911 =l add %.1648, 1176 + %.1912 =l copy 2875883040891070095 + storel %.1912, %.1911 + %.1913 =l add %.1648, 1184 + %.1914 =l copy 7 + storel %.1914, %.1913 + %.1915 =l add %.1648, 1192 + %.1916 =l copy 7 + storel %.1916, %.1915 + %.1917 =l add %.1648, 1200 + %.1918 =l copy 0 + storel %.1918, %.1917 + %.1919 =l add %.1648, 1208 + %.1920 =l copy 18446744073709551615 + storel %.1920, %.1919 + %.1921 =l add %.1648, 1216 + %.1922 =l copy 0 + storel %.1922, %.1921 + %.1923 =l add %.1648, 1224 + %.1924 =l copy 0 + storel %.1924, %.1923 + %.1925 =l add %.1648, 1232 + storel 14583114485114116895, %.1925 + %.1926 =l add %.1648, 1240 + %.1927 =l copy 1 + storel %.1927, %.1926 + %.1928 =l add %.1648, 1248 + %.1929 =l copy 0 + storel %.1929, %.1928 + %.1930 =l add %.1648, 1256 + %.1931 =l copy 1 + storel %.1931, %.1930 + %.1932 =l add %.1648, 1264 + %.1933 =l copy 18446744073709551606 + storel %.1933, %.1932 + %.1934 =l add %.1648, 1272 + %.1935 =l copy 8230877399174301244 + storel %.1935, %.1934 + %.1936 =l add %.1648, 1280 + %.1937 =l copy 876013142962575738 + storel %.1937, %.1936 + %.1938 =l add %.1648, 1288 + storel 10372949673387309524, %.1938 + %.1939 =l add %.1648, 1296 + %.1940 =l copy 6007172698835695880 + storel %.1940, %.1939 + %.1941 =l add %.1648, 1304 + storel 14991488133450330097, %.1941 + %.1942 =l add %.1648, 1312 + %.1943 =l copy 0 + storel %.1943, %.1942 + %.1944 =l add %.1648, 1320 + %.1945 =l copy 5937592181530390446 + storel %.1945, %.1944 + %.1946 =l add %.1648, 1328 + %.1947 =l copy 8230877399174301244 + storel %.1947, %.1946 + %.1948 =l add %.1648, 1336 + %.1949 =l copy 1 + storel %.1949, %.1948 + %.1950 =l add %.1648, 1344 + %.1951 =l copy 8230877399174301244 + storel %.1951, %.1950 + %.1952 =l add %.1648, 1352 + %.1953 =l copy 5937592181530390446 + storel %.1953, %.1952 + %.1954 =l add %.1648, 1360 + %.1955 =l copy 1 + storel %.1955, %.1954 + %.1956 =l add %.1648, 1368 + storel 15873037008906187302, %.1956 + %.1957 =l add %.1648, 1376 + %.1958 =l copy 0 + storel %.1958, %.1957 + %.1959 =l add %.1648, 1384 + %.1960 =l copy 2 + storel %.1960, %.1959 + %.1961 =l add %.1648, 1392 + %.1962 =l copy 2248553449639285191 + storel %.1962, %.1961 + %.1963 =l add %.1648, 1400 + storel 14234092197388013524, %.1963 + %.1964 =l add %.1648, 1408 + %.1965 =l copy 18446744073709551615 + storel %.1965, %.1964 + %.1966 =l add %.1648, 1416 + %.1967 =l copy 2875883040891070095 + storel %.1967, %.1966 + %.1968 =l add %.1648, 1424 + %.1969 =l copy 18446744073709551607 + storel %.1969, %.1968 + %.1970 =l add %.1648, 1432 + %.1971 =l copy 18446744073709551608 + storel %.1971, %.1970 + %.1972 =l add %.1648, 1440 + %.1973 =l copy 7 + storel %.1973, %.1972 + %.1974 =l add %.1648, 1448 + %.1975 =l copy 18446744073709551611 + storel %.1975, %.1974 + %.1976 =l add %.1648, 1456 + %.1977 =l copy 2248553449639285191 + storel %.1977, %.1976 + %.1978 =l add %.1648, 1464 + storel 15873037008906187302, %.1978 + %.1979 =l add %.1648, 1472 + %.1980 =l copy 2248553449639285191 + storel %.1980, %.1979 + %.1981 =l add %.1648, 1480 + %.1982 =l copy 18446744073709551607 + storel %.1982, %.1981 + %.1983 =l add %.1648, 1488 + %.1984 =l copy 18446744073709551607 + storel %.1984, %.1983 + %.1985 =l add %.1648, 1496 + %.1986 =l copy 0 + storel %.1986, %.1985 + %.1987 =l add %.1648, 1504 + %.1988 =l copy 2875883040891070095 + storel %.1988, %.1987 + %.1989 =l add %.1648, 1512 + %.1990 =l copy 5937592181530390446 + storel %.1990, %.1989 + %.1991 =l add %.1648, 1520 + %.1992 =l copy 2248553449639285191 + storel %.1992, %.1991 + %.1993 =l add %.1648, 1528 + %.1994 =l copy 18446744073709551610 + storel %.1994, %.1993 + %.1995 =l add %.1648, 1536 + %.1996 =l copy 4052120349730717228 + storel %.1996, %.1995 + %.1997 =l add %.1648, 1544 + %.1998 =l copy 18446744073709551607 + storel %.1998, %.1997 + %.1999 =l add %.1648, 1552 + %.2000 =l copy 0 + storel %.2000, %.1999 + %.2001 =l add %.1648, 1560 + %.2002 =l copy 1 + storel %.2002, %.2001 + %.2003 =l add %.1648, 1568 + %.2004 =l copy 18446744073709551615 + storel %.2004, %.2003 + %.2005 =l add %.1648, 1576 + %.2006 =l copy 18446744073709551608 + storel %.2006, %.2005 + %.2007 =l add %.1648, 1584 + storel 18269964541825259806, %.2007 + %.2008 =l add %.1648, 1592 + %.2009 =l copy 8230877399174301244 + storel %.2009, %.2008 + %.2011 =l add %.2010, 0 + %.2012 =w copy 886398557 + storew %.2012, %.2011 + storew 0, %.2013 +@for_cond.712 + %.2016 =w loadsw %.2013 + %.2017 =w csltw %.2016, 1 + jnz %.2017, @for_body.713, @for_join.715 +@for_body.713 + %.2018 =w copy 1 + %.2019 =w loadsw %.2013 + %.2020 =l extsw %.2019 + %.2021 =l mul %.2020, 4 + %.2022 =l add %.184, %.2021 + storew %.2018, %.2022 +@for_cont.714 + %.2023 =w loadsw %.2013 + %.2024 =w add %.2023, 1 + storew %.2024, %.2013 + jmp @for_cond.712 +@for_join.715 + storew 0, %.2013 +@for_cond.716 + %.2025 =w loadsw %.2013 + %.2026 =w csltw %.2025, 7 + jnz %.2026, @for_body.717, @for_join.719 +@for_body.717 + %.2027 =l copy $g_185 + %.2028 =l mul 16, 1 + %.2029 =l add %.2027, %.2028 + %.2030 =l copy %.2029 + %.2031 =w loadsw %.2013 + %.2032 =l extsw %.2031 + %.2033 =l mul %.2032, 8 + %.2034 =l add %.308, %.2033 + storel %.2030, %.2034 +@for_cont.718 + %.2035 =w loadsw %.2013 + %.2036 =w add %.2035, 1 + storew %.2036, %.2013 + jmp @for_cond.716 +@for_join.719 + storew 0, %.2013 +@for_cond.720 + %.2037 =w loadsw %.2013 + %.2038 =w csltw %.2037, 2 + jnz %.2038, @for_body.721, @for_join.723 +@for_body.721 + %.2039 =w copy 2935257452 + %.2040 =w loadsw %.2013 + %.2041 =l extsw %.2040 + %.2042 =l mul %.2041, 4 + %.2043 =l add %.309, %.2042 + storew %.2039, %.2043 +@for_cont.722 + %.2044 =w loadsw %.2013 + %.2045 =w add %.2044, 1 + storew %.2045, %.2013 + jmp @for_cond.720 +@for_join.723 + %.2046 =w copy 0 + storeb %.2046, $g_2 +@for_cond.724 + %.2047 =w loadsb $g_2 + %.2048 =w extsb %.2047 + %.2049 =w cslew %.2048, 5 + jnz %.2049, @for_body.725, @for_join.727 +@for_body.725 + %.2052 =l add %.2051, 0 + storel %.7, %.2052 + %.2054 =l add %.2053, 0 + %.2055 =l copy $g_265 + %.2056 =l mul 24, 1 + %.2057 =l add %.2055, %.2056 + %.2058 =l copy %.2057 + storel %.2058, %.2054 + %.2060 =l add %.2059, 0 + %.2061 =l copy 1 + storel %.2061, %.2060 + %.2063 =l add %.2062, 0 + %.2064 =l extsw 0 + %.2065 =l sub %.2064, 6 + %.2066 =w copy %.2065 + storeh %.2066, %.2063 + %.2068 =l add %.2067, 0 + storel $g_1476, %.2068 + %.2069 =l add %.2067, 8 + storel $g_1476, %.2069 + %.2070 =l add %.2067, 16 + storel $g_1476, %.2070 + %.2071 =l add %.2067, 24 + storel $g_1476, %.2071 + %.2072 =l add %.2067, 32 + storel $g_1476, %.2072 + %.2073 =l add %.2067, 40 + storel $g_1476, %.2073 + %.2074 =l add %.2067, 48 + storel $g_1476, %.2074 + %.2075 =l add %.2067, 56 + storel $g_1476, %.2075 + %.2077 =l add %.2076, 0 + %.2078 =w copy 18446744073709551615 + storew %.2078, %.2077 + %.2079 =l add %.2076, 4 + %.2080 =w copy 621699884 + storew %.2080, %.2079 + %.2081 =l add %.2076, 8 + %.2082 =w copy 3733628126 + storew %.2082, %.2081 + %.2083 =l add %.2076, 12 + %.2084 =w copy 1999332396 + storew %.2084, %.2083 + %.2085 =l add %.2076, 16 + %.2086 =w copy 8 + storew %.2086, %.2085 + %.2087 =l add %.2076, 20 + %.2088 =w copy 1999332396 + storew %.2088, %.2087 + %.2089 =l add %.2076, 24 + %.2090 =w copy 18446744073709551615 + storew %.2090, %.2089 + %.2091 =l add %.2076, 28 + %.2092 =w copy 7 + storew %.2092, %.2091 + %.2093 =l add %.2076, 32 + %.2094 =w copy 18446744073709551615 + storew %.2094, %.2093 + %.2095 =l add %.2076, 36 + %.2096 =w copy 621699884 + storew %.2096, %.2095 + %.2097 =l add %.2076, 40 + %.2098 =w copy 18446744073709551615 + storew %.2098, %.2097 + %.2099 =l add %.2076, 44 + %.2100 =w copy 18446744073709551606 + storew %.2100, %.2099 + %.2101 =l add %.2076, 48 + %.2102 =w copy 3733628126 + storew %.2102, %.2101 + %.2103 =l add %.2076, 52 + %.2104 =w copy 621699884 + storew %.2104, %.2103 + %.2105 =l add %.2076, 56 + %.2106 =w copy 18446744073709551615 + storew %.2106, %.2105 + %.2107 =l add %.2076, 60 + %.2108 =w copy 7 + storew %.2108, %.2107 + %.2109 =l add %.2076, 64 + %.2110 =w copy 18446744073709551615 + storew %.2110, %.2109 + %.2111 =l add %.2076, 68 + %.2112 =w copy 621699884 + storew %.2112, %.2111 + %.2113 =l add %.2076, 72 + %.2114 =w copy 18446744073709551615 + storew %.2114, %.2113 + %.2115 =l add %.2076, 76 + %.2116 =w copy 3827321299 + storew %.2116, %.2115 + %.2117 =l add %.2076, 80 + %.2118 =w copy 8 + storew %.2118, %.2117 + %.2119 =l add %.2076, 84 + %.2120 =w copy 7 + storew %.2120, %.2119 + %.2121 =l add %.2076, 88 + %.2122 =w copy 1116279750 + storew %.2122, %.2121 + %.2123 =l add %.2076, 92 + %.2124 =w copy 1999332396 + storew %.2124, %.2123 + %.2125 =l add %.2076, 96 + %.2126 =w copy 3733628126 + storew %.2126, %.2125 + %.2127 =l add %.2076, 100 + %.2128 =w copy 1999332396 + storew %.2128, %.2127 + %.2129 =l add %.2076, 104 + %.2130 =w copy 8 + storew %.2130, %.2129 + %.2131 =l add %.2076, 108 + %.2132 =w copy 1999332396 + storew %.2132, %.2131 + %.2133 =l add %.2076, 112 + %.2134 =w copy 3733628126 + storew %.2134, %.2133 + %.2135 =l add %.2076, 116 + %.2136 =w copy 621699884 + storew %.2136, %.2135 + %.2137 =l add %.2076, 120 + %.2138 =w copy 18446744073709551611 + storew %.2138, %.2137 + %.2139 =l add %.2076, 124 + %.2140 =w copy 1999332396 + storew %.2140, %.2139 + %.2141 =l add %.2076, 128 + %.2142 =w copy 18446744073709551615 + storew %.2142, %.2141 + %.2143 =l add %.2076, 132 + %.2144 =w copy 18446744073709551606 + storew %.2144, %.2143 + %.2145 =l add %.2076, 136 + %.2146 =w copy 1116279750 + storew %.2146, %.2145 + %.2147 =l add %.2076, 140 + %.2148 =w copy 18446744073709551606 + storew %.2148, %.2147 + %.2149 =l add %.2076, 144 + %.2150 =w copy 18446744073709551611 + storew %.2150, %.2149 + %.2151 =l add %.2076, 148 + %.2152 =w copy 3827321299 + storew %.2152, %.2151 + %.2153 =l add %.2076, 152 + %.2154 =w copy 18446744073709551611 + storew %.2154, %.2153 + %.2155 =l add %.2076, 156 + %.2156 =w copy 1999332396 + storew %.2156, %.2155 + %.2157 =l add %.2076, 160 + %.2158 =w copy 18446744073709551615 + storew %.2158, %.2157 + %.2159 =l add %.2076, 164 + %.2160 =w copy 18446744073709551606 + storew %.2160, %.2159 + %.2161 =l add %.2076, 168 + %.2162 =w copy 3733628126 + storew %.2162, %.2161 + %.2163 =l add %.2076, 172 + %.2164 =w copy 621699884 + storew %.2164, %.2163 + %.2165 =l add %.2076, 176 + %.2166 =w copy 18446744073709551615 + storew %.2166, %.2165 + %.2167 =l add %.2076, 180 + %.2168 =w copy 7 + storew %.2168, %.2167 + %.2169 =l add %.2076, 184 + %.2170 =w copy 18446744073709551615 + storew %.2170, %.2169 + %.2171 =l add %.2076, 188 + %.2172 =w copy 621699884 + storew %.2172, %.2171 + %.2173 =l add %.2076, 192 + %.2174 =w copy 18446744073709551615 + storew %.2174, %.2173 + %.2175 =l add %.2076, 196 + %.2176 =w copy 3827321299 + storew %.2176, %.2175 + %.2177 =l add %.2076, 200 + %.2178 =w copy 8 + storew %.2178, %.2177 + %.2179 =l add %.2076, 204 + %.2180 =w copy 7 + storew %.2180, %.2179 + %.2181 =l add %.2076, 208 + %.2182 =w copy 1116279750 + storew %.2182, %.2181 + %.2183 =l add %.2076, 212 + %.2184 =w copy 1999332396 + storew %.2184, %.2183 + storew 0, %.2185 +@for_cond.728 + %.2187 =w loadsw %.2185 + %.2188 =w csltw %.2187, 5 + jnz %.2188, @for_body.729, @for_join.731 +@for_body.729 + %.2189 =w loadsw %.2185 + %.2190 =l extsw %.2189 + %.2191 =l mul %.2190, 8 + %.2192 =l add %.2050, %.2191 + storel $g_23, %.2192 +@for_cont.730 + %.2193 =w loadsw %.2185 + %.2194 =w add %.2193, 1 + storew %.2194, %.2185 + jmp @for_cond.728 +@for_join.731 + %.2195 =w loadsb $g_2 + %.2196 =l extsb %.2195 + %.2197 =l mul %.2196, 4 + %.2198 =l add %.154, %.2197 + %.2199 =w loaduw %.2198 + %.2200 =w copy %.2199 + %.2201 =l call $func_8(w %.2200) + storel %.2201, %.1 + %.2202 =l loadl %.167 + %.2203 =l loadl %.167 + %.2204 =l call $func_4(l %.2201, l %.2202, l %.2203) + storel %.2204, %.167 + %.2205 =l loadl %.1 + storel %.2205, %.167 + %.2206 =w loadsb $g_2 + %.2207 =l extsb %.2206 + %.2208 =l mul %.2207, 4 + %.2209 =l add %.154, %.2208 + %.2210 =w loaduw %.2209 + %.2211 =l loadl %.9 + storel %.2211, %.2051 + %.2212 =l loadl %.173 + storel %.2212, %.11 + %.2213 =w cnel %.2211, %.2212 + %.2214 =l extsw %.2213 + %.2215 =l loadl %.2053 + storel %.2214, %.2215 + %.2216 =l copy 3872474516526135072 + %.2217 =l and %.2214, %.2216 + %.2218 =w cnel %.2217, 0 + jnz %.2218, @if_true.732, @if_false.733 +@if_true.732 + %.2220 =l add %.2219, 0 + %.2221 =w copy 29657 + storeh %.2221, %.2220 + %.2222 =l add %.2219, 2 + %.2223 =w copy 5 + storeh %.2223, %.2222 + %.2224 =l add %.2219, 4 + %.2225 =w copy 65535 + storeh %.2225, %.2224 + %.2226 =l add %.2219, 6 + %.2227 =w copy 11174 + storeh %.2227, %.2226 + %.2228 =l add %.2219, 8 + %.2229 =w copy 17984 + storeh %.2229, %.2228 + %.2230 =l add %.2219, 10 + %.2231 =w copy 17984 + storeh %.2231, %.2230 + %.2232 =l add %.2219, 12 + %.2233 =w copy 11174 + storeh %.2233, %.2232 + %.2234 =l add %.2219, 14 + %.2235 =w copy 28699 + storeh %.2235, %.2234 + %.2236 =l add %.2219, 16 + %.2237 =w copy 65532 + storeh %.2237, %.2236 + %.2238 =l add %.2219, 18 + %.2239 =w copy 28699 + storeh %.2239, %.2238 + %.2240 =l add %.2219, 20 + %.2241 =w copy 65529 + storeh %.2241, %.2240 + %.2242 =l add %.2219, 22 + %.2243 =w copy 0 + storeh %.2243, %.2242 + %.2244 =l add %.2219, 24 + %.2245 =w copy 9905 + storeh %.2245, %.2244 + %.2246 =l add %.2219, 26 + %.2247 =w copy 2665 + storeh %.2247, %.2246 + %.2248 =l add %.2219, 28 + %.2249 =w copy 42935 + storeh %.2249, %.2248 + %.2250 =l add %.2219, 30 + %.2251 =w copy 6 + storeh %.2251, %.2250 + %.2252 =l add %.2219, 32 + %.2253 =w copy 11174 + storeh %.2253, %.2252 + %.2254 =l add %.2219, 34 + %.2255 =w copy 5 + storeh %.2255, %.2254 + %.2256 =l add %.2219, 36 + %.2257 =w copy 8560 + storeh %.2257, %.2256 + %.2258 =l add %.2219, 38 + %.2259 =w copy 0 + storeh %.2259, %.2258 + %.2260 =l add %.2219, 40 + %.2261 =w copy 29657 + storeh %.2261, %.2260 + %.2262 =l add %.2219, 42 + %.2263 =w copy 9905 + storeh %.2263, %.2262 + %.2264 =l add %.2219, 44 + %.2265 =w copy 65530 + storeh %.2265, %.2264 + %.2266 =l add %.2219, 46 + %.2267 =w copy 20681 + storeh %.2267, %.2266 + %.2268 =l add %.2219, 48 + %.2269 =w copy 20681 + storeh %.2269, %.2268 + %.2270 =l add %.2219, 50 + %.2271 =w copy 65530 + storeh %.2271, %.2270 + %.2272 =l add %.2219, 52 + %.2273 =w copy 9905 + storeh %.2273, %.2272 + %.2274 =l add %.2219, 54 + %.2275 =w copy 1 + storeh %.2275, %.2274 + %.2276 =l add %.2219, 56 + %.2277 =w copy 1 + storeh %.2277, %.2276 + %.2278 =l add %.2219, 58 + %.2279 =w copy 11174 + storeh %.2279, %.2278 + %.2280 =l add %.2219, 60 + %.2281 =w copy 8560 + storeh %.2281, %.2280 + %.2282 =l add %.2219, 62 + %.2283 =w copy 1 + storeh %.2283, %.2282 + %.2284 =l add %.2219, 64 + %.2285 =w copy 65535 + storeh %.2285, %.2284 + %.2286 =l add %.2219, 66 + %.2287 =w copy 17984 + storeh %.2287, %.2286 + %.2288 =l add %.2219, 68 + %.2289 =w copy 39046 + storeh %.2289, %.2288 + %.2290 =l add %.2219, 70 + %.2291 =w copy 8 + storeh %.2291, %.2290 + %.2292 =l add %.2219, 72 + %.2293 =w copy 9905 + storeh %.2293, %.2292 + %.2294 =l add %.2219, 74 + %.2295 =w copy 7040 + storeh %.2295, %.2294 + %.2296 =l add %.2219, 76 + %.2297 =w copy 2665 + storeh %.2297, %.2296 + %.2298 =l add %.2219, 78 + %.2299 =w copy 54886 + storeh %.2299, %.2298 + %.2300 =l add %.2219, 80 + %.2301 =w copy 2665 + storeh %.2301, %.2300 + %.2302 =l add %.2219, 82 + %.2303 =w copy 7040 + storeh %.2303, %.2302 + %.2304 =l add %.2219, 84 + %.2305 =w copy 11174 + storeh %.2305, %.2304 + %.2306 =l add %.2219, 86 + %.2307 =w copy 11174 + storeh %.2307, %.2306 + %.2308 =l add %.2219, 88 + %.2309 =w copy 0 + storeh %.2309, %.2308 + %.2310 =l add %.2219, 90 + %.2311 =w copy 3 + storeh %.2311, %.2310 + %.2312 =l add %.2219, 92 + %.2313 =w copy 1 + storeh %.2313, %.2312 + %.2314 =l add %.2219, 94 + %.2315 =w copy 42935 + storeh %.2315, %.2314 + %.2316 =l add %.2219, 96 + %.2317 =w copy 50276 + storeh %.2317, %.2316 + %.2318 =l add %.2219, 98 + %.2319 =w copy 1 + storeh %.2319, %.2318 + %.2320 =l add %.2219, 100 + %.2321 =w copy 65530 + storeh %.2321, %.2320 + %.2322 =l add %.2219, 102 + %.2323 =w copy 65529 + storeh %.2323, %.2322 + %.2324 =l add %.2219, 104 + %.2325 =w copy 6129 + storeh %.2325, %.2324 + %.2326 =l add %.2219, 106 + %.2327 =w copy 7040 + storeh %.2327, %.2326 + %.2328 =l add %.2219, 108 + %.2329 =w copy 0 + storeh %.2329, %.2328 + %.2330 =l add %.2219, 110 + %.2331 =w copy 0 + storeh %.2331, %.2330 + %.2332 =l add %.2219, 112 + %.2333 =w copy 1 + storeh %.2333, %.2332 + %.2334 =l add %.2219, 114 + %.2335 =w copy 6 + storeh %.2335, %.2334 + %.2336 =l add %.2219, 116 + %.2337 =w copy 8 + storeh %.2337, %.2336 + %.2338 =l add %.2219, 118 + %.2339 =w copy 6 + storeh %.2339, %.2338 + %.2340 =l add %.2219, 120 + %.2341 =w copy 1 + storeh %.2341, %.2340 + %.2342 =l add %.2219, 122 + %.2343 =w copy 11174 + storeh %.2343, %.2342 + %.2344 =l add %.2219, 124 + %.2345 =w copy 34633 + storeh %.2345, %.2344 + %.2346 =l add %.2219, 126 + %.2347 =w copy 58382 + storeh %.2347, %.2346 + %.2348 =l add %.2219, 128 + %.2349 =w copy 65532 + storeh %.2349, %.2348 + %.2350 =l add %.2219, 130 + %.2351 =w copy 8 + storeh %.2351, %.2350 + %.2352 =l add %.2219, 132 + %.2353 =w copy 39628 + storeh %.2353, %.2352 + %.2354 =l add %.2219, 134 + %.2355 =w copy 54886 + storeh %.2355, %.2354 + %.2356 =l add %.2219, 136 + %.2357 =w copy 4 + storeh %.2357, %.2356 + %.2358 =l add %.2219, 138 + %.2359 =w copy 9905 + storeh %.2359, %.2358 + %.2361 =l add %.2360, 0 + %.2362 =w copy 511172155 + storew %.2362, %.2361 + %.2364 =l add %.2363, 0 + %.2365 =w copy 2207426902 + storew %.2365, %.2364 + %.2367 =l add %.2366, 0 + %.2368 =l extsw 0 + %.2369 =l sub %.2368, 1 + %.2370 =w copy %.2369 + storew %.2370, %.2367 + %.2372 =l add %.2371, 0 + %.2373 =w copy 3215778575 + storew %.2373, %.2372 + %.2374 =l add %.2371, 4 + %.2375 =w copy 3428235063 + storew %.2375, %.2374 + %.2376 =l add %.2371, 8 + %.2377 =w copy 3215778575 + storew %.2377, %.2376 + %.2378 =l add %.2371, 12 + %.2379 =w copy 3428235063 + storew %.2379, %.2378 + %.2380 =l add %.2371, 16 + %.2381 =w copy 3215778575 + storew %.2381, %.2380 + %.2382 =l add %.2371, 20 + %.2383 =w copy 3428235063 + storew %.2383, %.2382 + %.2384 =l add %.2371, 24 + %.2385 =w copy 3215778575 + storew %.2385, %.2384 + %.2386 =l add %.2371, 28 + %.2387 =w copy 3428235063 + storew %.2387, %.2386 + %.2388 =l add %.2371, 32 + %.2389 =w copy 3215778575 + storew %.2389, %.2388 + %.2390 =l add %.2371, 36 + %.2391 =w copy 3428235063 + storew %.2391, %.2390 + %.2392 =l add %.2371, 40 + %.2393 =w copy 3215778575 + storew %.2393, %.2392 + %.2394 =l add %.2371, 44 + %.2395 =w copy 3428235063 + storew %.2395, %.2394 + %.2396 =l add %.2371, 48 + %.2397 =w copy 3215778575 + storew %.2397, %.2396 + %.2398 =l add %.2371, 52 + %.2399 =w copy 3428235063 + storew %.2399, %.2398 + %.2400 =l add %.2371, 56 + %.2401 =w copy 3215778575 + storew %.2401, %.2400 + %.2402 =l add %.2371, 60 + %.2403 =w copy 3428235063 + storew %.2403, %.2402 + %.2404 =l add %.2371, 64 + %.2405 =w copy 3215778575 + storew %.2405, %.2404 + %.2406 =l add %.2371, 68 + %.2407 =w copy 3428235063 + storew %.2407, %.2406 + %.2408 =l add %.2371, 72 + %.2409 =w copy 3215778575 + storew %.2409, %.2408 + %.2410 =l add %.2371, 76 + %.2411 =w copy 3428235063 + storew %.2411, %.2410 + %.2412 =l add %.2371, 80 + %.2413 =w copy 3215778575 + storew %.2413, %.2412 + %.2414 =l add %.2371, 84 + %.2415 =w copy 3428235063 + storew %.2415, %.2414 + %.2416 =l add %.2371, 88 + %.2417 =w copy 3215778575 + storew %.2417, %.2416 + %.2418 =l add %.2371, 92 + %.2419 =w copy 3428235063 + storew %.2419, %.2418 + %.2420 =l add %.2371, 96 + %.2421 =w copy 3215778575 + storew %.2421, %.2420 + %.2422 =l add %.2371, 100 + %.2423 =w copy 3428235063 + storew %.2423, %.2422 + %.2424 =l add %.2371, 104 + %.2425 =w copy 3215778575 + storew %.2425, %.2424 + %.2426 =l add %.2371, 108 + %.2427 =w copy 3428235063 + storew %.2427, %.2426 + %.2428 =l add %.2371, 112 + %.2429 =w copy 3215778575 + storew %.2429, %.2428 + %.2430 =l add %.2371, 116 + %.2431 =w copy 3428235063 + storew %.2431, %.2430 + %.2432 =l add %.2371, 120 + %.2433 =w copy 3215778575 + storew %.2433, %.2432 + %.2434 =l add %.2371, 124 + %.2435 =w copy 3428235063 + storew %.2435, %.2434 + %.2436 =l add %.2371, 128 + %.2437 =w copy 3215778575 + storew %.2437, %.2436 + %.2438 =l add %.2371, 132 + %.2439 =w copy 3428235063 + storew %.2439, %.2438 + %.2440 =l add %.2371, 136 + %.2441 =w copy 3215778575 + storew %.2441, %.2440 + %.2442 =l add %.2371, 140 + %.2443 =w copy 3428235063 + storew %.2443, %.2442 + %.2444 =l add %.2371, 144 + %.2445 =w copy 3215778575 + storew %.2445, %.2444 + %.2446 =l add %.2371, 148 + %.2447 =w copy 3428235063 + storew %.2447, %.2446 + %.2448 =l add %.2371, 152 + %.2449 =w copy 3215778575 + storew %.2449, %.2448 + %.2450 =l add %.2371, 156 + %.2451 =w copy 3428235063 + storew %.2451, %.2450 + %.2452 =l add %.2371, 160 + %.2453 =w copy 3215778575 + storew %.2453, %.2452 + %.2454 =l add %.2371, 164 + %.2455 =w copy 3428235063 + storew %.2455, %.2454 + %.2456 =l add %.2371, 168 + %.2457 =w copy 3215778575 + storew %.2457, %.2456 + %.2458 =l add %.2371, 172 + %.2459 =w copy 3428235063 + storew %.2459, %.2458 + %.2460 =l add %.2371, 176 + %.2461 =w copy 3215778575 + storew %.2461, %.2460 + %.2462 =l add %.2371, 180 + %.2463 =w copy 3428235063 + storew %.2463, %.2462 + %.2464 =l add %.2371, 184 + %.2465 =w copy 3215778575 + storew %.2465, %.2464 + %.2466 =l add %.2371, 188 + %.2467 =w copy 3428235063 + storew %.2467, %.2466 + %.2471 =w copy 1 + storew %.2471, $g_84 +@for_cond.734 + %.2472 =w loaduw $g_84 + %.2473 =w copy 5 + %.2474 =w culew %.2472, %.2473 + jnz %.2474, @for_body.735, @for_join.737 +@for_body.735 + %.2476 =l add %.2475, 0 + %.2477 =l copy $g_1183 + %.2478 =l mul 0, 1 + %.2479 =l add %.2477, %.2478 + %.2480 =l copy %.2479 + storel %.2480, %.2476 + %.2481 =l add %.2475, 8 + %.2482 =l copy $g_1183 + %.2483 =l mul 0, 1 + %.2484 =l add %.2482, %.2483 + %.2485 =l copy %.2484 + storel %.2485, %.2481 + %.2486 =l add %.2475, 16 + storel $g_566, %.2486 + %.2487 =l add %.2475, 24 + %.2488 =l copy $g_1183 + %.2489 =l mul 0, 1 + %.2490 =l add %.2488, %.2489 + %.2491 =l copy %.2490 + storel %.2491, %.2487 + %.2492 =l add %.2475, 32 + %.2493 =l extsw 0 + %.2494 =l copy %.2493 + storel %.2494, %.2492 + %.2495 =l add %.2475, 40 + %.2496 =l extsw 0 + %.2497 =l copy %.2496 + storel %.2497, %.2495 + %.2498 =l add %.2475, 48 + %.2499 =l copy $g_518 + %.2500 =l mul 0, 1 + %.2501 =l add %.2499, %.2500 + %.2502 =l copy %.2501 + storel %.2502, %.2498 + %.2503 =l add %.2475, 56 + %.2504 =l extsw 0 + %.2505 =l copy %.2504 + storel %.2505, %.2503 + %.2506 =l add %.2475, 64 + %.2507 =l copy $g_265 + %.2508 =l mul 0, 1 + %.2509 =l add %.2507, %.2508 + %.2510 =l copy %.2509 + storel %.2510, %.2506 + %.2511 =l add %.2475, 72 + %.2512 =l copy $g_265 + %.2513 =l mul 0, 1 + %.2514 =l add %.2512, %.2513 + %.2515 =l copy %.2514 + storel %.2515, %.2511 + %.2516 =l add %.2475, 80 + %.2517 =l copy $g_518 + %.2518 =l mul 0, 1 + %.2519 =l add %.2517, %.2518 + %.2520 =l copy %.2519 + storel %.2520, %.2516 + %.2521 =l add %.2475, 88 + %.2522 =l copy $g_1183 + %.2523 =l mul 0, 1 + %.2524 =l add %.2522, %.2523 + %.2525 =l copy %.2524 + storel %.2525, %.2521 + %.2526 =l add %.2475, 96 + %.2527 =l copy $g_1183 + %.2528 =l mul 0, 1 + %.2529 =l add %.2527, %.2528 + %.2530 =l copy %.2529 + storel %.2530, %.2526 + %.2531 =l add %.2475, 104 + storel $g_566, %.2531 + %.2532 =l add %.2475, 112 + %.2533 =l copy $g_518 + %.2534 =l mul 0, 1 + %.2535 =l add %.2533, %.2534 + %.2536 =l copy %.2535 + storel %.2536, %.2532 + %.2537 =l add %.2475, 120 + %.2538 =l copy $g_1183 + %.2539 =l mul 0, 1 + %.2540 =l add %.2538, %.2539 + %.2541 =l copy %.2540 + storel %.2541, %.2537 + %.2542 =l add %.2475, 128 + %.2543 =l copy $g_265 + %.2544 =l mul 0, 1 + %.2545 =l add %.2543, %.2544 + %.2546 =l copy %.2545 + storel %.2546, %.2542 + %.2547 =l add %.2475, 136 + storel $g_566, %.2547 + %.2548 =l add %.2475, 144 + storel $g_46, %.2548 + %.2549 =l add %.2475, 152 + %.2550 =l extsw 0 + %.2551 =l copy %.2550 + storel %.2551, %.2549 + %.2552 =l add %.2475, 160 + %.2553 =l copy $g_1183 + %.2554 =l mul 0, 1 + %.2555 =l add %.2553, %.2554 + %.2556 =l copy %.2555 + storel %.2556, %.2552 + %.2557 =l add %.2475, 168 + storel $g_566, %.2557 + %.2558 =l add %.2475, 176 + storel $g_46, %.2558 + %.2559 =l add %.2475, 184 + %.2560 =l extsw 0 + %.2561 =l copy %.2560 + storel %.2561, %.2559 + %.2562 =l add %.2475, 192 + storel $g_566, %.2562 + %.2563 =l add %.2475, 200 + %.2564 =l extsw 0 + %.2565 =l copy %.2564 + storel %.2565, %.2563 + %.2566 =l add %.2475, 208 + %.2567 =l copy $g_265 + %.2568 =l mul 0, 1 + %.2569 =l add %.2567, %.2568 + %.2570 =l copy %.2569 + storel %.2570, %.2566 + %.2571 =l add %.2475, 216 + %.2572 =l copy $g_185 + %.2573 =l mul 0, 1 + %.2574 =l add %.2572, %.2573 + %.2575 =l copy %.2574 + storel %.2575, %.2571 + %.2576 =l add %.2475, 224 + storel $g_57, %.2576 + %.2577 =l add %.2475, 232 + %.2578 =l extsw 0 + %.2579 =l copy %.2578 + storel %.2579, %.2577 + %.2580 =l add %.2475, 240 + %.2581 =l extsw 0 + %.2582 =l copy %.2581 + storel %.2582, %.2580 + %.2583 =l add %.2475, 248 + %.2584 =l extsw 0 + %.2585 =l copy %.2584 + storel %.2585, %.2583 + %.2586 =l add %.2475, 256 + %.2587 =l copy $g_1183 + %.2588 =l mul 0, 1 + %.2589 =l add %.2587, %.2588 + %.2590 =l copy %.2589 + storel %.2590, %.2586 + %.2591 =l add %.2475, 264 + %.2592 =l copy $g_265 + %.2593 =l mul 0, 1 + %.2594 =l add %.2592, %.2593 + %.2595 =l copy %.2594 + storel %.2595, %.2591 + %.2596 =l add %.2475, 272 + %.2597 =l copy $g_265 + %.2598 =l mul 0, 1 + %.2599 =l add %.2597, %.2598 + %.2600 =l copy %.2599 + storel %.2600, %.2596 + %.2601 =l add %.2475, 280 + storel $g_566, %.2601 + %.2602 =l add %.2475, 288 + %.2603 =l copy $g_1183 + %.2604 =l mul 0, 1 + %.2605 =l add %.2603, %.2604 + %.2606 =l copy %.2605 + storel %.2606, %.2602 + %.2607 =l add %.2475, 296 + %.2608 =l copy $g_265 + %.2609 =l mul 0, 1 + %.2610 =l add %.2608, %.2609 + %.2611 =l copy %.2610 + storel %.2611, %.2607 + %.2612 =l add %.2475, 304 + %.2613 =l copy $g_265 + %.2614 =l mul 0, 1 + %.2615 =l add %.2613, %.2614 + %.2616 =l copy %.2615 + storel %.2616, %.2612 + %.2617 =l add %.2475, 312 + storel $g_57, %.2617 + %.2618 =l add %.2475, 320 + %.2619 =l copy $g_265 + %.2620 =l mul 0, 1 + %.2621 =l add %.2619, %.2620 + %.2622 =l copy %.2621 + storel %.2622, %.2618 + %.2623 =l add %.2475, 328 + %.2624 =l copy $g_1183 + %.2625 =l mul 0, 1 + %.2626 =l add %.2624, %.2625 + %.2627 =l copy %.2626 + storel %.2627, %.2623 + %.2628 =l add %.2475, 336 + storel $g_566, %.2628 + %.2629 =l add %.2475, 344 + %.2630 =l copy $g_185 + %.2631 =l mul 0, 1 + %.2632 =l add %.2630, %.2631 + %.2633 =l copy %.2632 + storel %.2633, %.2629 + %.2634 =l add %.2475, 352 + storel $g_566, %.2634 + %.2635 =l add %.2475, 360 + storel $g_46, %.2635 + %.2636 =l add %.2475, 368 + storel $g_57, %.2636 + %.2637 =l add %.2475, 376 + storel $g_566, %.2637 + %.2638 =l add %.2475, 384 + %.2639 =l copy $g_265 + %.2640 =l mul 0, 1 + %.2641 =l add %.2639, %.2640 + %.2642 =l copy %.2641 + storel %.2642, %.2638 + %.2643 =l add %.2475, 392 + storel $g_566, %.2643 + %.2644 =l add %.2475, 400 + storel $g_57, %.2644 + %.2645 =l add %.2475, 408 + storel $g_57, %.2645 + %.2646 =l add %.2475, 416 + %.2647 =l copy $g_1183 + %.2648 =l mul 0, 1 + %.2649 =l add %.2647, %.2648 + %.2650 =l copy %.2649 + storel %.2650, %.2646 + %.2651 =l add %.2475, 424 + %.2652 =l copy $g_265 + %.2653 =l mul 0, 1 + %.2654 =l add %.2652, %.2653 + %.2655 =l copy %.2654 + storel %.2655, %.2651 + %.2656 =l add %.2475, 432 + %.2657 =l copy $g_265 + %.2658 =l mul 0, 1 + %.2659 =l add %.2657, %.2658 + %.2660 =l copy %.2659 + storel %.2660, %.2656 + %.2661 =l add %.2475, 440 + %.2662 =l copy $g_518 + %.2663 =l mul 0, 1 + %.2664 =l add %.2662, %.2663 + %.2665 =l copy %.2664 + storel %.2665, %.2661 + %.2666 =l add %.2475, 448 + %.2667 =l copy $g_265 + %.2668 =l mul 0, 1 + %.2669 =l add %.2667, %.2668 + %.2670 =l copy %.2669 + storel %.2670, %.2666 + %.2671 =l add %.2475, 456 + %.2672 =l copy $g_518 + %.2673 =l mul 0, 1 + %.2674 =l add %.2672, %.2673 + %.2675 =l copy %.2674 + storel %.2675, %.2671 + %.2676 =l add %.2475, 464 + %.2677 =l copy $g_265 + %.2678 =l mul 0, 1 + %.2679 =l add %.2677, %.2678 + %.2680 =l copy %.2679 + storel %.2680, %.2676 + %.2681 =l add %.2475, 472 + storel $g_57, %.2681 + %.2682 =l add %.2475, 480 + storel $g_566, %.2682 + %.2683 =l add %.2475, 488 + storel $g_566, %.2683 + %.2684 =l add %.2475, 496 + %.2685 =l copy $g_185 + %.2686 =l mul 0, 1 + %.2687 =l add %.2685, %.2686 + %.2688 =l copy %.2687 + storel %.2688, %.2684 + %.2689 =l add %.2475, 504 + %.2690 =l copy $g_518 + %.2691 =l mul 0, 1 + %.2692 =l add %.2690, %.2691 + %.2693 =l copy %.2692 + storel %.2693, %.2689 + %.2694 =l add %.2475, 512 + %.2695 =l extsw 0 + %.2696 =l copy %.2695 + storel %.2696, %.2694 + %.2697 =l add %.2475, 520 + storel $g_566, %.2697 + %.2698 =l add %.2475, 528 + storel $g_566, %.2698 + %.2699 =l add %.2475, 536 + %.2700 =l extsw 0 + %.2701 =l copy %.2700 + storel %.2701, %.2699 + %.2702 =l add %.2475, 544 + %.2703 =l copy $g_185 + %.2704 =l mul 0, 1 + %.2705 =l add %.2703, %.2704 + %.2706 =l copy %.2705 + storel %.2706, %.2702 + %.2707 =l add %.2475, 552 + storel $g_57, %.2707 + %.2708 =l add %.2475, 560 + %.2709 =l copy $g_518 + %.2710 =l mul 0, 1 + %.2711 =l add %.2709, %.2710 + %.2712 =l copy %.2711 + storel %.2712, %.2708 + %.2713 =l add %.2475, 568 + %.2714 =l copy $g_518 + %.2715 =l mul 0, 1 + %.2716 =l add %.2714, %.2715 + %.2717 =l copy %.2716 + storel %.2717, %.2713 + %.2718 =l add %.2475, 576 + storel $g_57, %.2718 + %.2719 =l add %.2475, 584 + storel $g_566, %.2719 + %.2720 =l add %.2475, 592 + %.2721 =l copy $g_185 + %.2722 =l mul 0, 1 + %.2723 =l add %.2721, %.2722 + %.2724 =l copy %.2723 + storel %.2724, %.2720 + %.2725 =l add %.2475, 600 + %.2726 =l extsw 0 + %.2727 =l copy %.2726 + storel %.2727, %.2725 + %.2728 =l add %.2475, 608 + storel $g_566, %.2728 + %.2729 =l add %.2475, 616 + storel $g_566, %.2729 + %.2730 =l add %.2475, 624 + %.2731 =l extsw 0 + %.2732 =l copy %.2731 + storel %.2732, %.2730 + %.2733 =l add %.2475, 632 + storel $g_566, %.2733 + %.2734 =l add %.2475, 640 + %.2735 =l copy $g_185 + %.2736 =l mul 0, 1 + %.2737 =l add %.2735, %.2736 + %.2738 =l copy %.2737 + storel %.2738, %.2734 + %.2739 =l add %.2475, 648 + storel $g_566, %.2739 + %.2740 =l add %.2475, 656 + %.2741 =l extsw 0 + %.2742 =l copy %.2741 + storel %.2742, %.2740 + %.2743 =l add %.2475, 664 + storel $g_57, %.2743 + %.2744 =l add %.2475, 672 + %.2745 =l copy $g_265 + %.2746 =l mul 0, 1 + %.2747 =l add %.2745, %.2746 + %.2748 =l copy %.2747 + storel %.2748, %.2744 + %.2749 =l add %.2475, 680 + %.2750 =l copy $g_265 + %.2751 =l mul 0, 1 + %.2752 =l add %.2750, %.2751 + %.2753 =l copy %.2752 + storel %.2753, %.2749 + %.2754 =l add %.2475, 688 + %.2755 =l copy $g_265 + %.2756 =l mul 0, 1 + %.2757 =l add %.2755, %.2756 + %.2758 =l copy %.2757 + storel %.2758, %.2754 + %.2759 =l add %.2475, 696 + storel $g_57, %.2759 + %.2760 =l add %.2475, 704 + %.2761 =l copy $g_185 + %.2762 =l mul 0, 1 + %.2763 =l add %.2761, %.2762 + %.2764 =l copy %.2763 + storel %.2764, %.2760 + %.2765 =l add %.2475, 712 + %.2766 =l copy $g_265 + %.2767 =l mul 0, 1 + %.2768 =l add %.2766, %.2767 + %.2769 =l copy %.2768 + storel %.2769, %.2765 + %.2770 =l add %.2475, 720 + storel $g_566, %.2770 + %.2771 =l add %.2475, 728 + storel $g_46, %.2771 + %.2772 =l add %.2475, 736 + storel $g_57, %.2772 + %.2773 =l add %.2475, 744 + %.2774 =l copy $g_1183 + %.2775 =l mul 0, 1 + %.2776 =l add %.2774, %.2775 + %.2777 =l copy %.2776 + storel %.2777, %.2773 + %.2778 =l add %.2475, 752 + %.2779 =l copy $g_185 + %.2780 =l mul 0, 1 + %.2781 =l add %.2779, %.2780 + %.2782 =l copy %.2781 + storel %.2782, %.2778 + %.2783 =l add %.2475, 760 + storel $g_566, %.2783 + %.2784 =l add %.2475, 768 + %.2785 =l copy $g_265 + %.2786 =l mul 0, 1 + %.2787 =l add %.2785, %.2786 + %.2788 =l copy %.2787 + storel %.2788, %.2784 + %.2789 =l add %.2475, 776 + %.2790 =l copy $g_185 + %.2791 =l mul 0, 1 + %.2792 =l add %.2790, %.2791 + %.2793 =l copy %.2792 + storel %.2793, %.2789 + %.2794 =l add %.2475, 784 + storel $g_566, %.2794 + %.2795 =l add %.2475, 792 + %.2796 =l copy $g_265 + %.2797 =l mul 0, 1 + %.2798 =l add %.2796, %.2797 + %.2799 =l copy %.2798 + storel %.2799, %.2795 + %.2800 =l add %.2475, 800 + storel $g_46, %.2800 + %.2801 =l add %.2475, 808 + %.2802 =l copy $g_1183 + %.2803 =l mul 0, 1 + %.2804 =l add %.2802, %.2803 + %.2805 =l copy %.2804 + storel %.2805, %.2801 + %.2806 =l add %.2475, 816 + %.2807 =l copy $g_518 + %.2808 =l mul 0, 1 + %.2809 =l add %.2807, %.2808 + %.2810 =l copy %.2809 + storel %.2810, %.2806 + %.2811 =l add %.2475, 824 + storel $g_566, %.2811 + %.2812 =l add %.2475, 832 + %.2813 =l copy $g_265 + %.2814 =l mul 0, 1 + %.2815 =l add %.2813, %.2814 + %.2816 =l copy %.2815 + storel %.2816, %.2812 + %.2817 =l add %.2475, 840 + storel $g_57, %.2817 + %.2818 =l add %.2475, 848 + %.2819 =l extsw 0 + %.2820 =l copy %.2819 + storel %.2820, %.2818 + %.2821 =l add %.2475, 856 + storel $g_566, %.2821 + %.2822 =l add %.2475, 864 + %.2823 =l copy $g_1183 + %.2824 =l mul 0, 1 + %.2825 =l add %.2823, %.2824 + %.2826 =l copy %.2825 + storel %.2826, %.2822 + %.2827 =l add %.2475, 872 + %.2828 =l copy $g_265 + %.2829 =l mul 0, 1 + %.2830 =l add %.2828, %.2829 + %.2831 =l copy %.2830 + storel %.2831, %.2827 + %.2832 =l add %.2475, 880 + %.2833 =l copy $g_1183 + %.2834 =l mul 0, 1 + %.2835 =l add %.2833, %.2834 + %.2836 =l copy %.2835 + storel %.2836, %.2832 + %.2837 =l add %.2475, 888 + %.2838 =l extsw 0 + %.2839 =l copy %.2838 + storel %.2839, %.2837 + %.2840 =l add %.2475, 896 + storel $g_57, %.2840 + %.2841 =l add %.2475, 904 + %.2842 =l extsw 0 + %.2843 =l copy %.2842 + storel %.2843, %.2841 + %.2844 =l add %.2475, 912 + %.2845 =l copy $g_518 + %.2846 =l mul 0, 1 + %.2847 =l add %.2845, %.2846 + %.2848 =l copy %.2847 + storel %.2848, %.2844 + %.2849 =l add %.2475, 920 + storel $g_57, %.2849 + %.2850 =l add %.2475, 928 + %.2851 =l extsw 0 + %.2852 =l copy %.2851 + storel %.2852, %.2850 + %.2853 =l add %.2475, 936 + storel $g_566, %.2853 + %.2854 =l add %.2475, 944 + %.2855 =l copy $g_265 + %.2856 =l mul 0, 1 + %.2857 =l add %.2855, %.2856 + %.2858 =l copy %.2857 + storel %.2858, %.2854 + %.2859 =l add %.2475, 952 + %.2860 =l copy $g_185 + %.2861 =l mul 0, 1 + %.2862 =l add %.2860, %.2861 + %.2863 =l copy %.2862 + storel %.2863, %.2859 + %.2864 =l add %.2475, 960 + storel $g_566, %.2864 + %.2865 =l add %.2475, 968 + %.2866 =l extsw 0 + %.2867 =l copy %.2866 + storel %.2867, %.2865 + %.2868 =l add %.2475, 976 + %.2869 =l extsw 0 + %.2870 =l copy %.2869 + storel %.2870, %.2868 + %.2871 =l add %.2475, 984 + storel $g_57, %.2871 + %.2872 =l add %.2475, 992 + storel $g_566, %.2872 + %.2873 =l add %.2475, 1000 + %.2874 =l extsw 0 + %.2875 =l copy %.2874 + storel %.2875, %.2873 + %.2876 =l add %.2475, 1008 + %.2877 =l copy $g_185 + %.2878 =l mul 0, 1 + %.2879 =l add %.2877, %.2878 + %.2880 =l copy %.2879 + storel %.2880, %.2876 + %.2881 =l add %.2475, 1016 + storel $g_46, %.2881 + %.2882 =l add %.2475, 1024 + %.2883 =l extsw 0 + %.2884 =l copy %.2883 + storel %.2884, %.2882 + %.2885 =l add %.2475, 1032 + %.2886 =l extsw 0 + %.2887 =l copy %.2886 + storel %.2887, %.2885 + %.2888 =l add %.2475, 1040 + %.2889 =l copy $g_185 + %.2890 =l mul 0, 1 + %.2891 =l add %.2889, %.2890 + %.2892 =l copy %.2891 + storel %.2892, %.2888 + %.2893 =l add %.2475, 1048 + %.2894 =l copy $g_265 + %.2895 =l mul 0, 1 + %.2896 =l add %.2894, %.2895 + %.2897 =l copy %.2896 + storel %.2897, %.2893 + %.2898 =l add %.2475, 1056 + %.2899 =l extsw 0 + %.2900 =l copy %.2899 + storel %.2900, %.2898 + %.2901 =l add %.2475, 1064 + %.2902 =l copy $g_185 + %.2903 =l mul 0, 1 + %.2904 =l add %.2902, %.2903 + %.2905 =l copy %.2904 + storel %.2905, %.2901 + %.2906 =l add %.2475, 1072 + storel $g_57, %.2906 + %.2907 =l add %.2475, 1080 + %.2908 =l extsw 0 + %.2909 =l copy %.2908 + storel %.2909, %.2907 + %.2910 =l add %.2475, 1088 + storel $g_46, %.2910 + %.2911 =l add %.2475, 1096 + storel $g_566, %.2911 + %.2912 =l add %.2475, 1104 + storel $g_57, %.2912 + %.2913 =l add %.2475, 1112 + %.2914 =l copy $g_185 + %.2915 =l mul 0, 1 + %.2916 =l add %.2914, %.2915 + %.2917 =l copy %.2916 + storel %.2917, %.2913 + %.2918 =l add %.2475, 1120 + %.2919 =l copy $g_518 + %.2920 =l mul 0, 1 + %.2921 =l add %.2919, %.2920 + %.2922 =l copy %.2921 + storel %.2922, %.2918 + %.2923 =l add %.2475, 1128 + %.2924 =l copy $g_185 + %.2925 =l mul 0, 1 + %.2926 =l add %.2924, %.2925 + %.2927 =l copy %.2926 + storel %.2927, %.2923 + %.2928 =l add %.2475, 1136 + %.2929 =l copy $g_265 + %.2930 =l mul 0, 1 + %.2931 =l add %.2929, %.2930 + %.2932 =l copy %.2931 + storel %.2932, %.2928 + %.2933 =l add %.2475, 1144 + storel $g_46, %.2933 + %.2934 =l add %.2475, 1152 + %.2935 =l copy $g_1183 + %.2936 =l mul 0, 1 + %.2937 =l add %.2935, %.2936 + %.2938 =l copy %.2937 + storel %.2938, %.2934 + %.2939 =l add %.2475, 1160 + %.2940 =l extsw 0 + %.2941 =l copy %.2940 + storel %.2941, %.2939 + %.2942 =l add %.2475, 1168 + %.2943 =l copy $g_1183 + %.2944 =l mul 0, 1 + %.2945 =l add %.2943, %.2944 + %.2946 =l copy %.2945 + storel %.2946, %.2942 + %.2947 =l add %.2475, 1176 + storel $g_46, %.2947 + %.2948 =l add %.2475, 1184 + storel $g_566, %.2948 + %.2949 =l add %.2475, 1192 + storel $g_566, %.2949 + %.2950 =l add %.2475, 1200 + storel $g_566, %.2950 + %.2951 =l add %.2475, 1208 + storel $g_566, %.2951 + %.2952 =l add %.2475, 1216 + storel $g_57, %.2952 + %.2953 =l add %.2475, 1224 + %.2954 =l copy $g_1183 + %.2955 =l mul 0, 1 + %.2956 =l add %.2954, %.2955 + %.2957 =l copy %.2956 + storel %.2957, %.2953 + %.2958 =l add %.2475, 1232 + storel $g_566, %.2958 + %.2959 =l add %.2475, 1240 + %.2960 =l copy $g_518 + %.2961 =l mul 0, 1 + %.2962 =l add %.2960, %.2961 + %.2963 =l copy %.2962 + storel %.2963, %.2959 + %.2964 =l add %.2475, 1248 + storel $g_566, %.2964 + %.2965 =l add %.2475, 1256 + %.2966 =l copy $g_518 + %.2967 =l mul 0, 1 + %.2968 =l add %.2966, %.2967 + %.2969 =l copy %.2968 + storel %.2969, %.2965 + %.2970 =l add %.2475, 1264 + %.2971 =l extsw 0 + %.2972 =l copy %.2971 + storel %.2972, %.2970 + %.2973 =l add %.2475, 1272 + %.2974 =l copy $g_1183 + %.2975 =l mul 0, 1 + %.2976 =l add %.2974, %.2975 + %.2977 =l copy %.2976 + storel %.2977, %.2973 + %.2978 =l add %.2475, 1280 + %.2979 =l extsw 0 + %.2980 =l copy %.2979 + storel %.2980, %.2978 + %.2981 =l add %.2475, 1288 + storel $g_57, %.2981 + %.2982 =l add %.2475, 1296 + storel $g_566, %.2982 + %.2983 =l add %.2475, 1304 + %.2984 =l extsw 0 + %.2985 =l copy %.2984 + storel %.2985, %.2983 + %.2986 =l add %.2475, 1312 + %.2987 =l extsw 0 + %.2988 =l copy %.2987 + storel %.2988, %.2986 + %.2989 =l add %.2475, 1320 + storel $g_46, %.2989 + %.2990 =l add %.2475, 1328 + storel $g_566, %.2990 + %.2991 =l add %.2475, 1336 + storel $g_566, %.2991 + %.2992 =l add %.2475, 1344 + %.2993 =l copy $g_1183 + %.2994 =l mul 0, 1 + %.2995 =l add %.2993, %.2994 + %.2996 =l copy %.2995 + storel %.2996, %.2992 + %.2997 =l add %.2475, 1352 + %.2998 =l copy $g_265 + %.2999 =l mul 0, 1 + %.3000 =l add %.2998, %.2999 + %.3001 =l copy %.3000 + storel %.3001, %.2997 + %.3002 =l add %.2475, 1360 + %.3003 =l extsw 0 + %.3004 =l copy %.3003 + storel %.3004, %.3002 + %.3005 =l add %.2475, 1368 + %.3006 =l copy $g_185 + %.3007 =l mul 0, 1 + %.3008 =l add %.3006, %.3007 + %.3009 =l copy %.3008 + storel %.3009, %.3005 + %.3010 =l add %.2475, 1376 + %.3011 =l copy $g_518 + %.3012 =l mul 0, 1 + %.3013 =l add %.3011, %.3012 + %.3014 =l copy %.3013 + storel %.3014, %.3010 + %.3015 =l add %.2475, 1384 + %.3016 =l extsw 0 + %.3017 =l copy %.3016 + storel %.3017, %.3015 + %.3018 =l add %.2475, 1392 + storel $g_57, %.3018 + %.3019 =l add %.2475, 1400 + %.3020 =l copy $g_1183 + %.3021 =l mul 0, 1 + %.3022 =l add %.3020, %.3021 + %.3023 =l copy %.3022 + storel %.3023, %.3019 + %.3024 =l add %.2475, 1408 + %.3025 =l copy $g_265 + %.3026 =l mul 0, 1 + %.3027 =l add %.3025, %.3026 + %.3028 =l copy %.3027 + storel %.3028, %.3024 + %.3029 =l add %.2475, 1416 + %.3030 =l extsw 0 + %.3031 =l copy %.3030 + storel %.3031, %.3029 + %.3032 =l add %.2475, 1424 + storel $g_566, %.3032 + %.3033 =l add %.2475, 1432 + %.3034 =l extsw 0 + %.3035 =l copy %.3034 + storel %.3035, %.3033 + %.3036 =l add %.2475, 1440 + %.3037 =l extsw 0 + %.3038 =l copy %.3037 + storel %.3038, %.3036 + %.3039 =l add %.2475, 1448 + storel $g_57, %.3039 + %.3040 =l add %.2475, 1456 + %.3041 =l copy $g_185 + %.3042 =l mul 0, 1 + %.3043 =l add %.3041, %.3042 + %.3044 =l copy %.3043 + storel %.3044, %.3040 + %.3045 =l add %.2475, 1464 + %.3046 =l extsw 0 + %.3047 =l copy %.3046 + storel %.3047, %.3045 + %.3048 =l add %.2475, 1472 + %.3049 =l copy $g_185 + %.3050 =l mul 0, 1 + %.3051 =l add %.3049, %.3050 + %.3052 =l copy %.3051 + storel %.3052, %.3048 + %.3053 =l add %.2475, 1480 + %.3054 =l copy $g_1183 + %.3055 =l mul 0, 1 + %.3056 =l add %.3054, %.3055 + %.3057 =l copy %.3056 + storel %.3057, %.3053 + %.3058 =l add %.2475, 1488 + %.3059 =l copy $g_185 + %.3060 =l mul 0, 1 + %.3061 =l add %.3059, %.3060 + %.3062 =l copy %.3061 + storel %.3062, %.3058 + %.3063 =l add %.2475, 1496 + %.3064 =l copy $g_265 + %.3065 =l mul 0, 1 + %.3066 =l add %.3064, %.3065 + %.3067 =l copy %.3066 + storel %.3067, %.3063 + %.3068 =l add %.2475, 1504 + %.3069 =l extsw 0 + %.3070 =l copy %.3069 + storel %.3070, %.3068 + %.3071 =l add %.2475, 1512 + storel $g_57, %.3071 + %.3072 =l add %.2475, 1520 + %.3073 =l copy $g_265 + %.3074 =l mul 0, 1 + %.3075 =l add %.3073, %.3074 + %.3076 =l copy %.3075 + storel %.3076, %.3072 + %.3077 =l add %.2475, 1528 + storel $g_566, %.3077 + %.3078 =l add %.2475, 1536 + storel $g_566, %.3078 + %.3079 =l add %.2475, 1544 + %.3080 =l copy $g_265 + %.3081 =l mul 0, 1 + %.3082 =l add %.3080, %.3081 + %.3083 =l copy %.3082 + storel %.3083, %.3079 + %.3084 =l add %.2475, 1552 + storel $g_46, %.3084 + %.3085 =l add %.2475, 1560 + storel $g_566, %.3085 + %.3086 =l add %.2475, 1568 + %.3087 =l extsw 0 + %.3088 =l copy %.3087 + storel %.3088, %.3086 + %.3089 =l add %.2475, 1576 + storel $g_566, %.3089 + %.3090 =l add %.2475, 1584 + %.3091 =l copy $g_518 + %.3092 =l mul 0, 1 + %.3093 =l add %.3091, %.3092 + %.3094 =l copy %.3093 + storel %.3094, %.3090 + %.3095 =l add %.2475, 1592 + storel $g_566, %.3095 + %.3096 =l add %.2475, 1600 + storel $g_566, %.3096 + %.3097 =l add %.2475, 1608 + %.3098 =l extsw 0 + %.3099 =l copy %.3098 + storel %.3099, %.3097 + %.3100 =l add %.2475, 1616 + storel $g_46, %.3100 + %.3101 =l add %.2475, 1624 + storel $g_46, %.3101 + %.3102 =l add %.2475, 1632 + %.3103 =l copy $g_1183 + %.3104 =l mul 0, 1 + %.3105 =l add %.3103, %.3104 + %.3106 =l copy %.3105 + storel %.3106, %.3102 + %.3107 =l add %.2475, 1640 + %.3108 =l copy $g_1183 + %.3109 =l mul 0, 1 + %.3110 =l add %.3108, %.3109 + %.3111 =l copy %.3110 + storel %.3111, %.3107 + %.3112 =l add %.2475, 1648 + storel $g_566, %.3112 + %.3113 =l add %.2475, 1656 + storel $g_57, %.3113 + %.3114 =l add %.2475, 1664 + storel $g_46, %.3114 + %.3115 =l add %.2475, 1672 + %.3116 =l extsw 0 + %.3117 =l copy %.3116 + storel %.3117, %.3115 + %.3118 =l add %.2475, 1680 + %.3119 =l copy $g_518 + %.3120 =l mul 0, 1 + %.3121 =l add %.3119, %.3120 + %.3122 =l copy %.3121 + storel %.3122, %.3118 + %.3123 =l add %.2475, 1688 + storel $g_566, %.3123 + %.3124 =l add %.2475, 1696 + %.3125 =l copy $g_1183 + %.3126 =l mul 0, 1 + %.3127 =l add %.3125, %.3126 + %.3128 =l copy %.3127 + storel %.3128, %.3124 + %.3129 =l add %.2475, 1704 + %.3130 =l copy $g_265 + %.3131 =l mul 0, 1 + %.3132 =l add %.3130, %.3131 + %.3133 =l copy %.3132 + storel %.3133, %.3129 + %.3134 =l add %.2475, 1712 + %.3135 =l extsw 0 + %.3136 =l copy %.3135 + storel %.3136, %.3134 + %.3137 =l add %.2475, 1720 + %.3138 =l copy $g_185 + %.3139 =l mul 0, 1 + %.3140 =l add %.3138, %.3139 + %.3141 =l copy %.3140 + storel %.3141, %.3137 + %.3143 =l add %.3142, 0 + %.3144 =w copy 2383211199 + storew %.3144, %.3143 + %.3146 =l add %.3145, 0 + %.3147 =w copy 254 + storeb %.3147, %.3146 + %.3148 =l add %.3145, 1 + %.3149 =w copy 250 + storeb %.3149, %.3148 + %.3150 =l add %.3145, 2 + %.3151 =w copy 255 + storeb %.3151, %.3150 + %.3152 =l add %.3145, 3 + %.3153 =w copy 250 + storeb %.3153, %.3152 + %.3154 =l add %.3145, 4 + %.3155 =w copy 184 + storeb %.3155, %.3154 + %.3156 =l add %.3145, 5 + %.3157 =w copy 121 + storeb %.3157, %.3156 + %.3158 =l add %.3145, 6 + %.3159 =w copy 0 + storeb %.3159, %.3158 + %.3160 =l add %.3145, 7 + %.3161 =w copy 189 + storeb %.3161, %.3160 + %.3162 =l add %.3145, 8 + %.3163 =w copy 121 + storeb %.3163, %.3162 + %.3164 =l add %.3145, 9 + %.3165 =w copy 254 + storeb %.3165, %.3164 + %.3166 =l add %.3145, 10 + %.3167 =w copy 255 + storeb %.3167, %.3166 + %.3168 =l add %.3145, 11 + %.3169 =w copy 184 + storeb %.3169, %.3168 + %.3170 =l add %.3145, 12 + %.3171 =w copy 6 + storeb %.3171, %.3170 + %.3172 =l add %.3145, 13 + %.3173 =w copy 255 + storeb %.3173, %.3172 + %.3174 =l add %.3145, 14 + %.3175 =w copy 255 + storeb %.3175, %.3174 + %.3176 =l add %.3145, 15 + %.3177 =w copy 6 + storeb %.3177, %.3176 + %.3178 =l add %.3145, 16 + %.3179 =w copy 6 + storeb %.3179, %.3178 + %.3180 =l add %.3145, 17 + %.3181 =w copy 189 + storeb %.3181, %.3180 + %.3182 =l add %.3145, 18 + %.3183 =w copy 255 + storeb %.3183, %.3182 + %.3184 =l add %.3145, 19 + %.3185 =w copy 224 + storeb %.3185, %.3184 + %.3186 =l add %.3145, 20 + %.3187 =w copy 121 + storeb %.3187, %.3186 + %.3188 =l add %.3145, 21 + %.3189 =w copy 6 + storeb %.3189, %.3188 + %.3190 =l add %.3145, 22 + %.3191 =w copy 0 + storeb %.3191, %.3190 + %.3192 =l add %.3145, 23 + %.3193 =w copy 250 + storeb %.3193, %.3192 + %.3195 =l add %.3194, 0 + %.3196 =w copy 3785821799 + storew %.3196, %.3195 + %.3198 =l add %.3197, 0 + %.3199 =w copy 1382872816 + storew %.3199, %.3198 + %.3203 =w copy 65535 + %.3204 =w call $safe_rshift_func_uint16_t_u_s(w %.3203, w 10) + %.3205 =w copy %.3204 + %.3206 =l loadl $g_1070 + %.3207 =l loadl %.3206 + %.3208 =l loadl $g_1069 + %.3209 =l loadl %.3208 + %.3210 =l loadl %.3209 + %.3211 =w ceql %.3207, %.3210 + %.3212 =w loadsb $g_629 + %.3213 =w extsb %.3212 + %.3214 =w copy 255 + %.3215 =l copy $g_1183 + %.3216 =l mul 16, 1 + %.3217 =l add %.3215, %.3216 + %.3218 =l copy %.3217 + %.3219 =w loadsw %.3218 + %.3220 =l extsw 6 + %.3221 =l mul %.3220, 14 + %.3222 =l add %.2219, %.3221 + %.3223 =l extsw 1 + %.3224 =l mul %.3223, 2 + %.3225 =l add %.3222, %.3224 + %.3226 =w loaduh %.3225 + %.3227 =w sub %.3226, 1 + storeh %.3227, %.3225 + %.3228 =w copy %.3227 + %.3229 =l copy $g_794 + %.3230 =l mul 0, 1 + %.3231 =l add %.3229, %.3230 + %.3232 =l copy %.3231 + %.3233 =w loadsw %.3232 + %.3234 =w copy %.3233 + %.3235 =w call $safe_rshift_func_int8_t_s_s(w %.3234, w 0) + %.3236 =w loadsw %.3142 + %.3237 =w loadsw %.2360 + %.3238 =l extsw %.3237 + %.3239 =w csgtl %.3238, 71 + %.3240 =l extsw %.3239 + %.3241 =l loadl %.167 + %.3242 =w loadsw %.3241 + %.3243 =l extsw %.3242 + %.3244 =l call $safe_sub_func_int64_t_s_s(l %.3240, l %.3243) + %.3245 =w copy %.3244 + %.3246 =l loadl %.1 + %.3247 =w loadsw %.3246 + %.3248 =w copy %.3247 + %.3249 =w call $safe_div_func_uint8_t_u_u(w %.3245, w %.3248) + %.3250 =w extub %.3249 + %.3251 =w or %.3236, %.3250 + %.3252 =l loadl %.167 + %.3253 =w loadsw %.3252 + %.3254 =w copy %.3253 + %.3255 =w call $safe_mul_func_uint8_t_u_u(w %.3228, w %.3254) + %.3256 =w extub %.3255 + %.3257 =w loadsw %.2360 + %.3258 =w ceqw %.3256, %.3257 + %.3259 =l loadl %.13 + %.3260 =w cnel %.3259, $g_88 + %.3261 =w copy %.3260 + %.3262 =l copy $g_1183 + %.3263 =l mul 16, 1 + %.3264 =l add %.3262, %.3263 + %.3265 =l copy %.3264 + %.3266 =w loadsw %.3265 + %.3267 =w copy %.3266 + %.3268 =w call $safe_mul_func_int8_t_s_s(w %.3261, w %.3267) + %.3269 =l loadl %.167 + %.3270 =w loadsw %.3269 + %.3271 =w call $safe_rshift_func_int8_t_s_s(w %.3268, w %.3270) + %.3272 =w extsb %.3271 + %.3273 =w loadsw %.3142 + %.3274 =w copy %.3273 + %.3275 =w call $safe_add_func_uint16_t_u_u(w %.3272, w %.3274) + %.3276 =l loadl %.1 + %.3277 =w loadsw %.3276 + %.3278 =l loadl $g_1123 + %.3279 =l loadl %.167 + %.3280 =w loadsw %.3279 + %.3281 =l loadl %.167 + storew %.3280, %.3281 + %.3282 =l extsw %.3280 + %.3283 =w cnel %.3282, 183 + %.3284 =w loadsw %.3142 + %.3285 =w csgew %.3283, %.3284 + %.3286 =w copy %.3285 + %.3287 =w call $safe_mul_func_uint8_t_u_u(w %.3214, w %.3286) + %.3288 =w extub %.3287 + %.3289 =w cnew %.3288, 0 + jnz %.3289, @logic_join.739, @logic_right.738 +@logic_right.738 + %.3290 =w loadsw %.3142 + %.3291 =w cnew %.3290, 0 +@logic_join.739 + %.3292 =w phi @for_body.735 %.3289, @logic_right.738 %.3291 + %.3293 =l extsw 2 + %.3294 =l mul %.3293, 8 + %.3295 =l add %.3145, %.3294 + %.3296 =l extsw 1 + %.3297 =l mul %.3296, 4 + %.3298 =l add %.3295, %.3297 + %.3299 =l extsw 2 + %.3300 =l mul %.3299, 1 + %.3301 =l add %.3298, %.3300 + %.3302 =w loadub %.3301 + %.3303 =w extub %.3302 + %.3304 =w or %.3292, %.3303 + %.3305 =w copy %.3304 + %.3306 =l loadl %.2059 + %.3307 =w copy %.3306 + %.3308 =w call $safe_mul_func_int8_t_s_s(w %.3305, w %.3307) + %.3309 =l extsb %.3308 + %.3310 =w csgel %.3309, 15 + %.3311 =w ceqw %.3213, %.3310 + %.3312 =w and %.3211, %.3311 + %.3313 =w loadsw %.2360 + %.3314 =w cslew %.3312, %.3313 + %.3315 =l extsw 0 + %.3316 =l mul %.3315, 8 + %.3317 =l add %.3145, %.3316 + %.3318 =l extsw 1 + %.3319 =l mul %.3318, 4 + %.3320 =l add %.3317, %.3319 + %.3321 =l extsw 0 + %.3322 =l mul %.3321, 1 + %.3323 =l add %.3320, %.3322 + %.3324 =w loadub %.3323 + %.3325 =w extub %.3324 + %.3326 =w csgew %.3314, %.3325 + %.3327 =w copy %.3326 + %.3328 =w call $safe_mul_func_int8_t_s_s(w %.3205, w %.3327) + %.3329 =w extsb %.3328 + %.3330 =w cnew %.3329, 0 + jnz %.3330, @if_true.740, @if_false.741 +@if_true.740 + %.3332 =l add %.3331, 0 + %.3333 =w copy 618275278 + storew %.3333, %.3332 + %.3335 =l add %.3334, 0 + storel $g_858, %.3335 + %.3337 =l add %.3336, 0 + %.3338 =l extsw 0 + %.3339 =l sub %.3338, 1 + %.3340 =w copy %.3339 + storew %.3340, %.3337 + %.3342 =l add %.3341, 0 + %.3343 =w copy 2 + storew %.3343, %.3342 + %.3345 =l add %.3344, 0 + %.3346 =w copy 4149646672 + storew %.3346, %.3345 + %.3347 =l loadl $g_23 + %.3348 =w loadsw %.3347 + %.3349 =l loadl %.3334 + %.3350 =w loaduh %.3349 + %.3351 =w extuh %.3350 + %.3352 =w loaduw %.3331 + %.3353 =w or %.3351, %.3352 + %.3354 =w copy %.3353 + storeh %.3354, %.3349 + %.3355 =w loadsw %.3142 + %.3356 =l extsw %.3355 + %.3357 =l extsw 0 + %.3358 =l mul %.3357, 8 + %.3359 =l add $g_850, %.3358 + %.3360 =l loadl %.3359 + %.3361 =l loadl $g_1589 + %.3362 =w ceql %.3360, %.3361 + %.3363 =l extsw %.3362 + %.3364 =l call $safe_add_func_uint64_t_u_u(l %.3356, l %.3363) + %.3365 =l loadl $g_1604 + %.3366 =l add %.3365, 1 + storel %.3366, $g_1604 + %.3367 =l or %.3364, %.3365 + %.3368 =w copy %.3367 + %.3369 =l copy 11677653728370779156 + %.3370 =l call $safe_mod_func_int64_t_s_s(l 768946313878535519, l %.3369) + %.3371 =l loadl $g_1590 + %.3372 =w loaduh %.3371 + %.3373 =l extuh %.3372 + %.3374 =w csltl %.3370, %.3373 + %.3375 =w copy %.3374 + %.3376 =w call $safe_rshift_func_int8_t_s_u(w %.3368, w %.3375) + %.3377 =w extsb %.3376 + %.3378 =l extsw 0 + %.3379 =l mul %.3378, 48 + %.3380 =l add %.2371, %.3379 + %.3381 =l extsw 0 + %.3382 =l mul %.3381, 16 + %.3383 =l add %.3380, %.3382 + %.3384 =l extsw 1 + %.3385 =l mul %.3384, 4 + %.3386 =l add %.3383, %.3385 + %.3387 =w loadsw %.3386 + %.3388 =w copy 213 + %.3389 =l copy $g_518 + %.3390 =l mul 24, 1 + %.3391 =l add %.3389, %.3390 + %.3392 =l copy %.3391 + %.3393 =l loadl %.3392 + %.3394 =w copy %.3393 + %.3395 =w call $safe_mul_func_uint8_t_u_u(w %.3388, w %.3394) + %.3396 =w extub %.3395 + %.3397 =w and %.3387, %.3396 + %.3398 =w copy %.3397 + %.3399 =l loadl $g_1590 + %.3400 =w loaduh %.3399 + %.3401 =w extuh %.3400 + %.3402 =w call $safe_lshift_func_int16_t_s_u(w %.3398, w %.3401) + %.3403 =w copy 4 + %.3404 =l loadl $g_1313 + %.3405 =l loadl %.3404 + %.3406 =l loadl %.3405 + %.3407 =l loadl %.3406 + %.3408 =w loaduw %.3407 + %.3409 =w call $safe_div_func_uint32_t_u_u(w %.3403, w %.3408) + %.3410 =l extsw 0 + %.3411 =l extsw 0 + %.3412 =l mul %.3411, 40 + %.3413 =l add $g_1615, %.3412 + %.3414 =l extsw 4 + %.3415 =l mul %.3414, 8 + %.3416 =l add %.3413, %.3415 + %.3417 =l loadl %.3416 + %.3418 =w ceql %.3410, %.3417 + %.3419 =w copy %.3418 + %.3420 =w call $safe_mul_func_int16_t_s_s(w %.3377, w %.3419) + %.3421 =w extsh %.3420 + %.3422 =w or %.3348, %.3421 + storew %.3422, %.3347 + jmp @if_join.742 +@if_false.741 + %.3423 =w loaduh $g_1617 + %.3424 =l extuh %.3423 + ret %.3424 +@if_join.742 + %.3425 =l loadl $g_173 + %.3426 =w loadsw %.3425 + %.3427 =w cnew %.3426, 0 + jnz %.3427, @if_true.743, @if_false.744 +@if_true.743 + jmp @for_cont.736 +@if_false.744 + %.3428 =l copy $g_518 + %.3429 =l mul 40, 1 + %.3430 =l add %.3428, %.3429 + %.3431 =l copy %.3430 + storew 5, %.3431 +@for_cond.745 + %.3432 =l copy $g_518 + %.3433 =l mul 40, 1 + %.3434 =l add %.3432, %.3433 + %.3435 =l copy %.3434 + %.3436 =w loadsw %.3435 + %.3437 =w csgew %.3436, 1 + jnz %.3437, @for_body.746, @for_join.748 +@for_body.746 + %.3438 =l extsw 0 + %.3439 =l mul %.3438, 2 + %.3440 =l add %.50, %.3439 + %.3441 =w loaduh %.3440 + %.3442 =l extuh %.3441 + ret %.3442 +@for_cont.747 + %.3443 =l copy $g_518 + %.3444 =l mul 40, 1 + %.3445 =l add %.3443, %.3444 + %.3446 =l copy %.3445 + %.3447 =w loadsw %.3446 + %.3448 =w sub %.3447, 1 + storew %.3448, %.3446 + jmp @for_cond.745 +@for_join.748 +@for_cont.736 + %.3449 =w loaduw $g_84 + %.3450 =w copy 1 + %.3451 =w add %.3449, %.3450 + storew %.3451, $g_84 + jmp @for_cond.734 +@for_join.737 + jmp @if_join.749 +@if_false.733 + %.3453 =l add %.3452, 0 + %.3454 =w copy 64090 + storeh %.3454, %.3453 + %.3456 =l add %.3455, 0 + storel $g_1038, %.3456 + %.3458 =l add %.3457, 0 + storel $g_776, %.3458 + %.3459 =l add %.3457, 8 + %.3460 =l extsw 0 + %.3461 =l copy %.3460 + storel %.3461, %.3459 + %.3462 =l add %.3457, 16 + storel $g_776, %.3462 + %.3463 =l add %.3457, 24 + %.3464 =l extsw 0 + %.3465 =l copy %.3464 + storel %.3465, %.3463 + %.3466 =l add %.3457, 32 + storel $g_776, %.3466 + %.3467 =l add %.3457, 40 + %.3468 =l extsw 0 + %.3469 =l copy %.3468 + storel %.3469, %.3467 + %.3470 =l add %.3457, 48 + storel $g_776, %.3470 + %.3471 =l add %.3457, 56 + %.3472 =l extsw 0 + %.3473 =l copy %.3472 + storel %.3473, %.3471 + %.3476 =l add %.3475, 0 + %.3477 =l extsw 0 + %.3478 =l sub %.3477, 1 + %.3479 =w copy %.3478 + storew %.3479, %.3476 + %.3481 =l add %.3480, 0 + %.3482 =w copy 3767361468 + storew %.3482, %.3481 + storew 0, %.3483 +@for_cond.750 + %.3484 =w loadsw %.3483 + %.3485 =w csltw %.3484, 7 + jnz %.3485, @for_body.751, @for_join.753 +@for_body.751 + %.3486 =w copy 2380640979 + %.3487 =w loadsw %.3483 + %.3488 =l extsw %.3487 + %.3489 =l mul %.3488, 4 + %.3490 =l add %.3474, %.3489 + storew %.3486, %.3490 +@for_cont.752 + %.3491 =w loadsw %.3483 + %.3492 =w add %.3491, 1 + storew %.3492, %.3483 + jmp @for_cond.750 +@for_join.753 + %.3493 =l loadl $g_1123 + %.3494 =l loadl $g_1123 + %.3495 =l loaduw %.3493 + storew %.3495, %.3494 + %.3496 =l add %.3493, 4 + %.3497 =l add %.3494, 4 + %.3498 =l loaduw %.3496 + storew %.3498, %.3497 + %.3499 =l add %.3496, 4 + %.3500 =l add %.3497, 4 + %.3501 =l loaduw %.3499 + storew %.3501, %.3500 + %.3502 =l add %.3499, 4 + %.3503 =l add %.3500, 4 + %.3504 =l loaduw %.3502 + storew %.3504, %.3503 + %.3505 =l add %.3502, 4 + %.3506 =l add %.3503, 4 + %.3507 =l loaduw %.3505 + storew %.3507, %.3506 + %.3508 =l add %.3505, 4 + %.3509 =l add %.3506, 4 + %.3510 =l extsw 0 + storel %.3510, $g_1604 +@for_cond.754 + %.3511 =l loadl $g_1604 + %.3512 =l extsw 0 + %.3513 =w culel %.3511, %.3512 + jnz %.3513, @for_body.755, @for_join.757 +@for_body.755 + %.3515 =l add %.3514, 0 + %.3516 =l extsw 0 + %.3517 =l sub %.3516, 4 + %.3518 =l copy %.3517 + storel %.3518, %.3515 + %.3519 =l add %.3514, 8 + storel 7086594054811500327, %.3519 + %.3520 =l add %.3514, 16 + storel 6118719662111260546, %.3520 + %.3521 =l add %.3514, 24 + %.3522 =l copy 2 + storel %.3522, %.3521 + %.3523 =l add %.3514, 32 + %.3524 =l extsw 0 + %.3525 =l sub %.3524, 4 + %.3526 =l copy %.3525 + storel %.3526, %.3523 + %.3527 =l add %.3514, 40 + storel 6118719662111260546, %.3527 + %.3528 =l add %.3514, 48 + %.3529 =l extsw 0 + %.3530 =l sub %.3529, 4 + %.3531 =l copy %.3530 + storel %.3531, %.3528 + %.3532 =l add %.3514, 56 + storel 724151589213230642, %.3532 + %.3533 =l add %.3514, 64 + storel 7086594054811500327, %.3533 + %.3534 =l add %.3514, 72 + %.3535 =l copy 2 + storel %.3535, %.3534 + %.3536 =l add %.3514, 80 + storel 724151589213230642, %.3536 + %.3537 =l add %.3514, 88 + storel 724151589213230642, %.3537 + %.3538 =l add %.3514, 96 + storel 6118719662111260546, %.3538 + %.3539 =l add %.3514, 104 + storel 6118719662111260546, %.3539 + %.3540 =l add %.3514, 112 + %.3541 =l copy 7 + storel %.3541, %.3540 + %.3542 =l add %.3514, 120 + %.3543 =l copy 2 + storel %.3543, %.3542 + %.3545 =l add %.3544, 0 + %.3546 =w copy 253 + storeb %.3546, %.3545 + %.3548 =l add %.3547, 0 + %.3549 =w copy 1738457409 + storew %.3549, %.3548 + %.3551 =l add %.3550, 0 + storel %.2053, %.3551 + %.3553 =l add %.3552, 0 + %.3554 =w loadsb $g_2 + %.3555 =l extsb %.3554 + %.3556 =l mul %.3555, 4 + %.3557 =l add %.154, %.3556 + storel %.3557, %.3553 + %.3559 =l add %.3558, 0 + storel $g_1038, %.3559 + %.3561 =l add %.3560, 0 + %.3562 =l extsw 0 + %.3563 =l sub %.3562, 3 + %.3564 =w copy %.3563 + storeh %.3564, %.3561 + %.3566 =l add %.3565, 0 + %.3567 =w copy 65532 + storeh %.3567, %.3566 + %.3570 =w loadsw %.3547 + %.3571 =l extsw %.3570 + %.3572 =l loadl $g_1604 + %.3573 =l extsw 3 + %.3574 =l add %.3572, %.3573 + %.3575 =l copy %.3574 + %.3576 =l mul %.3575, 2 + %.3577 =l add %.50, %.3576 + %.3578 =w loaduh %.3577 + %.3579 =l extuh %.3578 + %.3580 =w cnel 51900480, 0 + jnz %.3580, @logic_join.759, @logic_right.758 +@logic_right.758 + %.3581 =w copy 4045989480 + %.3582 =l loadl %.167 + %.3583 =w loadsw %.3582 + %.3584 =l loadl $g_1589 + %.3585 =l loadl %.3584 + %.3586 =w loaduh %.3585 + %.3587 =w extuh %.3586 + %.3588 =w cnew %.3587, 0 + jnz %.3588, @logic_right.760, @logic_join.761 +@logic_right.760 + %.3589 =l extsw 0 + %.3590 =l sub %.3589, 1 + %.3591 =w copy %.3590 + %.3592 =w copy 13 + %.3593 =w call $safe_rshift_func_int16_t_s_u(w %.3591, w %.3592) + %.3594 =w extsh %.3593 + %.3595 =w loaduh %.3452 + %.3596 =w extuh %.3595 + %.3597 =w cnew %.3594, %.3596 + %.3598 =w cnew %.3597, 0 +@logic_join.761 + %.3599 =w phi @logic_right.758 %.3588, @logic_right.760 %.3598 + %.3600 =w or %.3583, %.3599 + storew %.3600, %.3582 + %.3601 =l loadl $g_23 + storew %.3600, %.3601 + %.3602 =w copy %.3600 + %.3603 =w call $safe_mod_func_uint32_t_u_u(w %.3581, w %.3602) + %.3604 =w copy %.3603 + %.3605 =l extsw 0 + %.3606 =l sub %.3605, 1 + %.3607 =w copy %.3606 + %.3608 =w call $safe_mod_func_int8_t_s_s(w %.3604, w %.3607) + %.3609 =w extsb %.3608 + %.3610 =l extsw 1 + %.3611 =l mul %.3610, 2 + %.3612 =l add %.51, %.3611 + %.3613 =w loadsh %.3612 + %.3614 =l extsh %.3613 + %.3615 =l xor %.3614, 153 + %.3616 =w copy %.3615 + storeh %.3616, %.3612 + %.3617 =w copy %.3616 + %.3618 =l copy $g_185 + %.3619 =l mul 44, 1 + %.3620 =l add %.3618, %.3619 + %.3621 =l copy %.3620 + %.3622 =w loadsw %.3621 + %.3623 =w call $safe_lshift_func_uint8_t_u_s(w %.3617, w %.3622) + %.3624 =w extub %.3623 + %.3625 =w and %.3609, %.3624 + %.3626 =l extsw %.3625 + %.3627 =l copy $g_794 + %.3628 =l mul 12, 1 + %.3629 =l add %.3627, %.3628 + %.3630 =l copy %.3629 + %.3631 =w loadsw %.3630 + %.3632 =l extsw %.3631 + %.3633 =l call $safe_add_func_int64_t_s_s(l %.3626, l %.3632) + %.3634 =w copy %.3633 + %.3635 =w call $safe_unary_minus_func_int32_t_s(w %.3634) + %.3636 =l extsw %.3635 + %.3637 =w csgtl %.3636, 1 + %.3638 =l extsw %.3637 + %.3639 =l extsw 2 + %.3640 =l mul %.3639, 32 + %.3641 =l add %.3514, %.3640 + %.3642 =l extsw 2 + %.3643 =l mul %.3642, 8 + %.3644 =l add %.3641, %.3643 + %.3645 =l loadl %.3644 + %.3646 =l copy %.3645 + %.3647 =l call $safe_div_func_uint64_t_u_u(l %.3638, l %.3646) + %.3648 =l extsw 0 + %.3649 =l sub %.3648, 1 + %.3650 =l copy %.3649 + %.3651 =w cnel %.3647, %.3650 + %.3652 =w cnew %.3651, 0 +@logic_join.759 + %.3653 =w phi @for_body.755 %.3580, @logic_join.761 %.3652 + %.3654 =w loadsb $g_631 + %.3655 =l extsb %.3654 + %.3656 =w csgel 19, %.3655 + %.3657 =l copy $g_1183 + %.3658 =l mul 8, 1 + %.3659 =l add %.3657, %.3658 + %.3660 =l copy %.3659 + %.3661 =l loadl %.3660 + %.3662 =l copy %.3661 + %.3663 =l copy 18446744073709551615 + %.3664 =l call $safe_div_func_uint64_t_u_u(l %.3662, l %.3663) + %.3665 =w cugtl %.3579, %.3664 + %.3666 =l loadl $g_1590 + %.3667 =w loaduh %.3666 + %.3668 =w extuh %.3667 + %.3669 =w or %.3665, %.3668 + %.3670 =l copy 1979550271 + %.3671 =w cultl 4294967295, %.3670 + %.3672 =w copy %.3671 + %.3673 =l extsw 1 + %.3674 =l mul %.3673, 80 + %.3675 =l add %.185, %.3674 + %.3676 =l extsw 8 + %.3677 =l mul %.3676, 8 + %.3678 =l add %.3675, %.3677 + %.3679 =l loadl %.3678 + %.3680 =w copy %.3679 + %.3681 =w call $safe_add_func_int16_t_s_s(w %.3672, w %.3680) + %.3682 =w loadub %.3544 + %.3683 =l extub %.3682 + %.3684 =w loadsh %.2062 + %.3685 =l extsh %.3684 + %.3686 =l call $safe_mod_func_int64_t_s_s(l %.3683, l %.3685) + %.3687 =l or %.3571, %.3686 + %.3688 =w copy %.3687 + storew %.3688, %.3547 + %.3689 =w loadsw $g_1645 + %.3690 =w cnew %.3689, 0 + jnz %.3690, @if_true.762, @if_false.763 +@if_true.762 + %.3692 =l add %.3691, 0 + %.3693 =w copy 5 + storeb %.3693, %.3692 + %.3695 =l add %.3694, 0 + %.3696 =w copy 10544 + storeh %.3696, %.3695 + %.3697 =l extsw 0 + %.3698 =l loadl %.3550 + %.3699 =w cnel %.3697, %.3698 + %.3700 =w loadsb %.3691 + %.3701 =w extsb %.3700 + %.3702 =l loadl $g_1589 + %.3703 =l loadl %.3702 + %.3704 =w loaduh %.3703 + %.3705 =l extuh %.3704 + %.3706 =l copy $g_794 + %.3707 =l mul 4, 1 + %.3708 =l add %.3706, %.3707 + %.3709 =l copy %.3708 + %.3710 =w loaduw %.3709 + %.3711 =l loadl $g_173 + %.3712 =w loadsw %.3711 + %.3713 =w loadsb %.3691 + %.3714 =w extsb %.3713 + %.3715 =w or %.3712, %.3714 + %.3716 =w copy %.3715 + %.3717 =l loadl %.167 + %.3718 =w loadsw %.3717 + %.3719 =l extsw %.3718 + %.3720 =l loadl $g_1037 + %.3721 =l loadl %.3720 + %.3722 =l loadl %.3721 + %.3723 =l loadl $g_1037 + %.3724 =l loadl %.3723 + storel %.3722, %.3724 + %.3725 =l loadl %.3552 + %.3726 =w ceql %.3722, %.3725 + %.3727 =w copy %.3726 + %.3728 =w loadsb %.3691 + %.3729 =w extsb %.3728 + %.3730 =w call $safe_mul_func_int16_t_s_s(w %.3727, w %.3729) + %.3731 =w extsh %.3730 + %.3732 =l loadl %.1 + %.3733 =w loadsw %.3732 + %.3734 =w and %.3731, %.3733 + %.3735 =l extsw %.3734 + %.3736 =w loadsh $g_81 + %.3737 =l extsh %.3736 + %.3738 =l call $safe_mod_func_int64_t_s_s(l %.3735, l %.3737) + %.3739 =w cslel %.3719, %.3738 + %.3740 =w copy %.3739 + %.3741 =w call $safe_rshift_func_int16_t_s_u(w %.3716, w %.3740) + %.3742 =w copy %.3741 + %.3743 =w copy 2 + %.3744 =w call $safe_rshift_func_int8_t_s_u(w %.3742, w %.3743) + %.3745 =l extsb %.3744 + %.3746 =w csgel 48, %.3745 + %.3747 =l extsw %.3746 + %.3748 =l copy 643467775842209626 + %.3749 =l call $safe_mod_func_uint64_t_u_u(l %.3747, l %.3748) + %.3750 =l xor %.3705, %.3749 + %.3751 =w copy %.3750 + storeh %.3751, %.3703 + %.3752 =w loadsh %.3694 + %.3753 =w copy %.3752 + %.3754 =w call $safe_mul_func_uint16_t_u_u(w %.3751, w %.3753) + %.3755 =w extuh %.3754 + %.3756 =w call $safe_mod_func_int32_t_s_s(w %.3701, w %.3755) + %.3757 =w copy %.3756 + %.3758 =w call $safe_lshift_func_int16_t_s_s(w %.3757, w 12) + %.3759 =w extsh %.3758 + %.3760 =w xor %.3699, %.3759 + %.3761 =l loadl $g_173 + storew %.3760, %.3761 + %.3762 =l loadl $g_173 + %.3763 =w loadsw %.3762 + %.3764 =w cnew %.3763, 0 + jnz %.3764, @if_true.764, @if_false.765 +@if_true.764 + jmp @for_cont.756 +@if_false.765 + %.3765 =w loadsw %.241 + %.3766 =l extsw %.3765 + ret %.3766 +@if_false.763 + %.3769 =l add %.3768, 0 + %.3770 =l extsw 0 + %.3771 =l copy %.3770 + storel %.3771, %.3769 + %.3773 =l add %.3772, 0 + storel $g_46, %.3773 + %.3774 =l add %.3772, 8 + storel $g_46, %.3774 + %.3775 =l add %.3772, 16 + storel $g_46, %.3775 + %.3776 =l add %.3772, 24 + storel $g_46, %.3776 + %.3777 =l add %.3772, 32 + storel $g_46, %.3777 + %.3779 =l add %.3778, 0 + %.3780 =l copy $g_185 + %.3781 =l mul 8, 1 + %.3782 =l add %.3780, %.3781 + %.3783 =l copy %.3782 + storel %.3783, %.3779 + storew 0, %.3784 +@for_cond.767 + %.3786 =w loadsw %.3784 + %.3787 =w csltw %.3786, 4 + jnz %.3787, @for_body.768, @for_join.770 +@for_body.768 + %.3788 =w copy 66482976 + %.3789 =w loadsw %.3784 + %.3790 =l extsw %.3789 + %.3791 =l mul %.3790, 4 + %.3792 =l add %.3767, %.3791 + storew %.3788, %.3792 +@for_cont.769 + %.3793 =w loadsw %.3784 + %.3794 =w add %.3793, 1 + storew %.3794, %.3784 + jmp @for_cond.767 +@for_join.770 + %.3795 =l extsw 0 + %.3796 =l loadl $g_1604 + %.3797 =l copy %.3796 + %.3798 =l mul %.3797, 40 + %.3799 =l add $g_1615, %.3798 + %.3800 =l loadl $g_1604 + %.3801 =l extsw 1 + %.3802 =l add %.3800, %.3801 + %.3803 =l copy %.3802 + %.3804 =l mul %.3803, 8 + %.3805 =l add %.3799, %.3804 + %.3806 =l loadl %.3805 + %.3807 =w ceql %.3795, %.3806 + %.3808 =l extsw %.3807 + %.3809 =w culel %.3808, 0 + %.3810 =w cnew %.3809, 0 + jnz %.3810, @logic_right.771, @logic_join.772 +@logic_right.771 + %.3811 =l extsw 0 + %.3812 =l mul %.3811, 4 + %.3813 =l add %.3767, %.3812 + %.3814 =w loadsw %.3813 + storew %.3814, %.3547 + %.3815 =l extsw 0 + %.3816 =l mul %.3815, 4 + %.3817 =l add %.3767, %.3816 + %.3818 =w loadsw %.3817 + %.3819 =w or %.3814, %.3818 + %.3820 =w copy %.3819 + %.3821 =l extsw 0 + %.3822 =l mul %.3821, 4 + %.3823 =l add %.3767, %.3822 + %.3824 =w loadsw %.3823 + %.3825 =l loadl %.3558 + %.3826 =l loadl $g_1313 + storel %.3825, %.3826 + %.3827 =w copy 1 + %.3828 =w copy 5 + %.3829 =w call $safe_lshift_func_int8_t_s_u(w %.3827, w %.3828) + %.3830 =l extsb %.3829 + %.3831 =l loadl %.3778 + storel %.3830, %.3831 + %.3832 =l loadl $g_173 + %.3833 =w loadsw %.3832 + %.3834 =w cnew %.3833, 0 + jnz %.3834, @logic_join.774, @logic_right.773 +@logic_right.773 + %.3835 =w loadsh %.3560 + %.3836 =w extsh %.3835 + %.3837 =w cnew %.3836, 0 +@logic_join.774 + %.3838 =w phi @logic_right.771 %.3834, @logic_right.773 %.3837 + %.3839 =w copy %.3838 + %.3840 =w copy 9 + %.3841 =w call $safe_lshift_func_int16_t_s_u(w %.3839, w %.3840) + %.3842 =w loadsw %.52 + %.3843 =w copy %.3842 + %.3844 =w call $safe_mod_func_int16_t_s_s(w %.3841, w %.3843) + %.3845 =l extsh %.3844 + %.3846 =l and %.3845, 54431 + %.3847 =w copy %.3846 + %.3848 =l extsw 6 + %.3849 =l mul %.3848, 4 + %.3850 =l add %.244, %.3849 + storew %.3847, %.3850 + %.3851 =l extuw %.3847 + %.3852 =w csgtl %.3830, %.3851 + %.3853 =w copy %.3852 + %.3854 =w copy 76 + %.3855 =w call $safe_add_func_uint8_t_u_u(w %.3853, w %.3854) + %.3856 =l extub %.3855 + %.3857 =w cslel %.3856, 44776 + %.3858 =w copy %.3857 + %.3859 =w call $safe_lshift_func_int8_t_s_s(w %.3858, w 4) + %.3860 =l extsb %.3859 + %.3861 =w loadub $g_566 + %.3862 =l extub %.3861 + %.3863 =l call $safe_div_func_int64_t_s_s(l %.3860, l %.3862) + %.3864 =l loadl %.3455 + %.3865 =w cnel %.3825, %.3864 + %.3866 =l extsw %.3865 + %.3867 =w csltl %.3866, 156 + %.3868 =w or %.3824, %.3867 + %.3869 =l loadl $g_1604 + %.3870 =l extsw 3 + %.3871 =l add %.3869, %.3870 + %.3872 =l copy %.3871 + %.3873 =l mul %.3872, 2 + %.3874 =l add %.50, %.3873 + %.3875 =w loaduh %.3874 + %.3876 =l extuh %.3875 + %.3877 =l and 57607, %.3876 + %.3878 =l and %.3877, 3 + %.3879 =w copy %.3878 + %.3880 =l extsw 2 + %.3881 =l mul %.3880, 32 + %.3882 =l add %.3514, %.3881 + %.3883 =l extsw 2 + %.3884 =l mul %.3883, 8 + %.3885 =l add %.3882, %.3884 + %.3886 =l loadl %.3885 + %.3887 =w copy %.3886 + %.3888 =w call $safe_lshift_func_uint8_t_u_u(w %.3879, w %.3887) + %.3889 =w extub %.3888 + %.3890 =w call $safe_add_func_uint16_t_u_u(w %.3820, w %.3889) + %.3891 =w extuh %.3890 + %.3892 =w cnew %.3891, 0 +@logic_join.772 + %.3893 =w phi @for_join.770 %.3810, @logic_join.774 %.3892 + %.3894 =l loadl $g_23 + storew %.3893, %.3894 + %.3895 =l extsw 2 + %.3896 =l mul %.3895, 4 + %.3897 =l add %.3767, %.3896 + %.3898 =w loadsw %.3897 + %.3899 =w cnew %.3898, 0 + jnz %.3899, @if_true.775, @if_false.776 +@if_true.775 + jmp @for_cont.756 +@if_false.776 +@if_join.766 + %.3900 =l loadl $g_1123 + %.3901 =l loadl $g_1123 + %.3902 =l loaduw %.3900 + storew %.3902, %.3901 + %.3903 =l add %.3900, 4 + %.3904 =l add %.3901, 4 + %.3905 =l loaduw %.3903 + storew %.3905, %.3904 + %.3906 =l add %.3903, 4 + %.3907 =l add %.3904, 4 + %.3908 =l loaduw %.3906 + storew %.3908, %.3907 + %.3909 =l add %.3906, 4 + %.3910 =l add %.3907, 4 + %.3911 =l loaduw %.3909 + storew %.3911, %.3910 + %.3912 =l add %.3909, 4 + %.3913 =l add %.3910, 4 + %.3914 =l loaduw %.3912 + storew %.3914, %.3913 + %.3915 =l add %.3912, 4 + %.3916 =l add %.3913, 4 + %.3917 =w loadub %.3544 + %.3918 =w extub %.3917 + %.3919 =l loadl %.1 + storew %.3918, %.3919 + %.3920 =w cnew %.3918, 0 + jnz %.3920, @if_true.777, @if_false.778 +@if_true.777 + %.3922 =l add %.3921, 0 + storel $g_1123, %.3922 + %.3924 =l add %.3923, 0 + storel %.3921, %.3924 + %.3926 =l add %.3925, 0 + %.3927 =l copy $g_1183 + %.3928 =l mul 8, 1 + %.3929 =l add %.3927, %.3928 + %.3930 =l copy %.3929 + storel %.3930, %.3926 + %.3932 =l add %.3931, 0 + %.3933 =w copy 1741455405 + storew %.3933, %.3932 + %.3935 =l add %.3934, 0 + %.3936 =w copy 86 + storeb %.3936, %.3935 + %.3938 =l loadl $g_296 + %.3939 =l loadl %.3938 + %.3940 =w loadub %.3544 + %.3941 =w extub %.3940 + %.3942 =w cnew %.3941, 0 + jnz %.3942, @logic_join.780, @logic_right.779 +@logic_right.779 + %.3943 =l extsw 0 + %.3944 =l extsw 3 + %.3945 =l mul %.3944, 8 + %.3946 =l add %.3457, %.3945 + %.3947 =l loadl %.3946 + %.3948 =w cnel %.3943, %.3947 + %.3949 =l loadl $g_173 + %.3950 =w loadsw %.3949 + %.3951 =l extsw %.3950 + %.3952 =l loadl %.3923 + %.3953 =l loadl $g_1705 + storel %.3953, $g_1705 + %.3954 =w ceql %.3952, %.3953 + %.3955 =w copy %.3954 + %.3956 =w copy 27473 + %.3957 =l loadl $g_1604 + %.3958 =l copy %.3957 + %.3959 =l mul %.3958, 4 + %.3960 =l add %.154, %.3959 + %.3961 =w loaduw %.3960 + %.3962 =l copy 3 + %.3963 =l loadl %.3925 + storel %.3962, %.3963 + %.3964 =w cnel %.3962, 0 + jnz %.3964, @logic_join.786, @logic_right.785 +@logic_right.785 + %.3965 =l copy $g_130 + %.3966 =l mul 4, 1 + %.3967 =l add %.3965, %.3966 + %.3968 =l copy %.3967 + %.3969 =w loaduw %.3968 + %.3970 =w cnew %.3969, 0 + jnz %.3970, @logic_join.788, @logic_right.787 +@logic_right.787 + %.3971 =w loaduh %.3452 + %.3972 =w extuh %.3971 + %.3973 =w cnew %.3972, 0 +@logic_join.788 + %.3974 =w phi @logic_right.785 %.3970, @logic_right.787 %.3973 + %.3975 =l extsw %.3974 + %.3976 =l xor %.3975, 14260922971091615517 + %.3977 =w copy %.3976 + %.3978 =w loadsw %.3931 + %.3979 =w copy %.3978 + %.3980 =w call $safe_div_func_uint16_t_u_u(w %.3977, w %.3979) + %.3981 =l extsw 0 + %.3982 =l sub %.3981, 9 + %.3983 =w copy %.3982 + %.3984 =w loaduh %.3452 + %.3985 =w extuh %.3984 + %.3986 =w call $safe_lshift_func_int8_t_s_u(w %.3983, w %.3985) + %.3987 =l extsb %.3986 + %.3988 =w ceql %.3987, 253 + %.3989 =w copy %.3988 + %.3990 =w loaduh %.3452 + %.3991 =w copy %.3990 + %.3992 =w call $safe_div_func_uint8_t_u_u(w %.3989, w %.3991) + %.3993 =w extub %.3992 + %.3994 =l extsw 2 + %.3995 =l mul %.3994, 32 + %.3996 =l add %.3514, %.3995 + %.3997 =l extsw 2 + %.3998 =l mul %.3997, 8 + %.3999 =l add %.3996, %.3998 + %.4000 =l loadl %.3999 + %.4001 =w copy %.4000 + %.4002 =w call $safe_sub_func_uint32_t_u_u(w %.3993, w %.4001) + %.4003 =w cnew %.4002, 0 +@logic_join.786 + %.4004 =w phi @logic_right.779 %.3964, @logic_join.788 %.4003 + %.4005 =w copy %.4004 + %.4006 =w xor %.3961, %.4005 + storew %.4006, %.3960 + %.4007 =w cnew %.4006, 0 + jnz %.4007, @logic_join.784, @logic_right.783 +@logic_right.783 + %.4008 =w cnel 880984431, 0 +@logic_join.784 + %.4009 =w phi @logic_join.786 %.4007, @logic_right.783 %.4008 + %.4010 =l extsw 0 + %.4011 =w cnel %.4010, $g_1590 + %.4012 =w copy %.4011 + %.4013 =w call $safe_sub_func_uint16_t_u_u(w %.3956, w %.4012) + %.4014 =w copy %.4013 + %.4015 =w call $safe_mod_func_uint8_t_u_u(w %.3955, w %.4014) + %.4016 =w extub %.4015 + %.4017 =w loaduh %.3452 + %.4018 =w extuh %.4017 + %.4019 =w and %.4016, %.4018 + %.4020 =w loadsb %.3934 + %.4021 =w extsb %.4020 + %.4022 =w csgew %.4019, %.4021 + %.4023 =l extsw %.4022 + %.4024 =w csgtl %.4023, 63294 + %.4025 =l extsw %.4024 + %.4026 =l loadl %.55 + %.4027 =l xor %.4025, %.4026 + %.4028 =l xor %.3951, %.4027 + %.4029 =w copy %.4028 + storew %.4029, %.3949 + %.4030 =w loaduh %.3452 + %.4031 =w loadsw %.3547 + %.4032 =l loadl $g_23 + %.4033 =w loadsw %.4032 + %.4034 =w csltw %.4031, %.4033 + %.4035 =w copy %.4034 + %.4036 =w copy 4 + %.4037 =w call $safe_lshift_func_uint8_t_u_u(w %.4035, w %.4036) + %.4038 =w extub %.4037 + %.4039 =w loaduh %.3452 + %.4040 =w extuh %.4039 + %.4041 =w csgtw %.4038, %.4040 + %.4042 =w ceqw %.4041, 0 + %.4043 =l extsw %.4042 + %.4044 =w culel %.4043, 10993731942557843686 + %.4045 =w copy %.4044 + %.4046 =l loadl %.167 + %.4047 =w loadsw %.4046 + %.4048 =w copy %.4047 + %.4049 =w call $safe_mul_func_int8_t_s_s(w %.4045, w %.4048) + %.4050 =w extsb %.4049 + %.4051 =w call $safe_sub_func_int32_t_s_s(w %.3948, w %.4050) + %.4052 =w cnew %.4051, 0 + jnz %.4052, @logic_join.782, @logic_right.781 +@logic_right.781 + %.4053 =w loadsh %.3560 + %.4054 =w extsh %.4053 + %.4055 =w cnew %.4054, 0 +@logic_join.782 + %.4056 =w phi @logic_join.784 %.4052, @logic_right.781 %.4055 + %.4057 =w cnew %.4056, 0 +@logic_join.780 + %.4058 =w phi @if_true.777 %.3942, @logic_join.782 %.4057 + storel %.3547, %.1 + %.4059 =w loadsh %.58 + %.4060 =l extsh %.4059 + ret %.4060 +@if_false.778 + %.4063 =l add %.4062, 0 + %.4064 =l copy $g_185 + %.4065 =l mul 36, 1 + %.4066 =l add %.4064, %.4065 + %.4067 =l copy %.4066 + storel %.4067, %.4063 + %.4069 =l add %.4068, 0 + storel %.4062, %.4069 + %.4071 =l add %.4070, 0 + storel $g_566, %.4071 + storew 0, %.4072 +@for_cond.790 + %.4073 =w loadsw %.4072 + %.4074 =w csltw %.4073, 6 + jnz %.4074, @for_body.791, @for_join.793 +@for_body.791 + %.4075 =w copy 65396 + %.4076 =w loadsw %.4072 + %.4077 =l extsw %.4076 + %.4078 =l mul %.4077, 2 + %.4079 =l add %.4061, %.4078 + storeh %.4075, %.4079 +@for_cont.792 + %.4080 =w loadsw %.4072 + %.4081 =w add %.4080, 1 + storew %.4081, %.4072 + jmp @for_cond.790 +@for_join.793 + %.4082 =l extsw 1 + %.4083 =l mul %.4082, 2 + %.4084 =l add %.4061, %.4083 + %.4085 =w loaduh %.4084 + %.4086 =w sub %.4085, 1 + storeh %.4086, %.4084 + %.4087 =w loadub %.3544 + %.4088 =w extub %.4087 + %.4089 =w cnew %.4088, 0 + jnz %.4089, @logic_join.795, @logic_right.794 +@logic_right.794 + %.4090 =l loadl %.3552 + %.4091 =l loadl %.4068 + storel %.4090, %.4091 + %.4092 =w ceql %.4090, $g_1298 + %.4093 =l extsw %.4092 + %.4094 =w cslel %.4093, 59139 + %.4095 =w cnew %.4094, 0 + jnz %.4095, @logic_join.797, @logic_right.796 +@logic_right.796 + %.4096 =l copy $g_518 + %.4097 =l mul 0, 1 + %.4098 =l add %.4096, %.4097 + %.4099 =l copy %.4098 + %.4100 =w loadub %.4099 + %.4101 =w cnel 0, 0 + jnz %.4101, @logic_join.799, @logic_right.798 +@logic_right.798 + %.4102 =l extsw 0 + %.4103 =w cnel %.4102, $g_1706 + %.4104 =w loaduh %.3452 + %.4105 =w copy %.4104 + %.4106 =l loadl %.4070 + storeb %.4105, %.4106 + %.4107 =l loadl $g_80 + %.4108 =w copy %.4107 + %.4109 =w call $safe_div_func_uint8_t_u_u(w %.4105, w %.4108) + %.4110 =w loaduh %.3452 + %.4111 =w extuh %.4110 + %.4112 =w cnew %.4111, 0 + jnz %.4112, @logic_join.801, @logic_right.800 +@logic_right.800 + %.4113 =l loadl $g_1590 + %.4114 =w loaduh %.4113 + %.4115 =w extuh %.4114 + %.4116 =w cnew %.4115, 0 +@logic_join.801 + %.4117 =w phi @logic_right.798 %.4112, @logic_right.800 %.4116 + %.4118 =w cslew %.4103, %.4117 + %.4119 =w cnew %.4118, 0 +@logic_join.799 + %.4120 =w phi @logic_right.796 %.4101, @logic_join.801 %.4119 + %.4121 =l extsw %.4120 + %.4122 =l extsw 0 + %.4123 =l sub %.4122, 1 + %.4124 =w cslel %.4121, %.4123 + %.4125 =w cnel 0, 0 +@logic_join.797 + %.4126 =w phi @logic_right.794 %.4095, @logic_join.799 %.4125 + %.4127 =w cnew %.4126, 0 +@logic_join.795 + %.4128 =w phi @for_join.793 %.4089, @logic_join.797 %.4127 + storew %.4128, %.3547 + %.4129 =l extsw 4 + %.4130 =l mul %.4129, 2 + %.4131 =l add %.4061, %.4130 + %.4132 =w loaduh %.4131 + %.4133 =w copy %.4132 + %.4134 =l extsw 0 + %.4135 =l mul %.4134, 2 + %.4136 =l add %.4061, %.4135 + %.4137 =w loaduh %.4136 + %.4138 =w copy %.4137 + %.4139 =w call $safe_mul_func_int8_t_s_s(w %.4133, w %.4138) + %.4140 =w extsb %.4139 + %.4141 =w loadsh %.3560 + %.4142 =w extsh %.4141 + %.4143 =w cnew %.4140, %.4142 + %.4144 =l extsw %.4143 + %.4145 =w csgtl %.4144, 621071666104868882 + %.4146 =l extsw %.4145 + %.4147 =l copy $g_265 + %.4148 =l mul 24, 1 + %.4149 =l add %.4147, %.4148 + %.4150 =l copy %.4149 + %.4151 =l loadl %.4150 + %.4152 =l call $safe_add_func_uint64_t_u_u(l %.4146, l %.4151) + %.4153 =w loaduh %.3565 + %.4154 =w copy %.4153 + storeh %.4154, %.261 +@if_join.789 +@for_cont.756 + %.4155 =l loadl $g_1604 + %.4156 =l extsw 1 + %.4157 =l add %.4155, %.4156 + storel %.4157, $g_1604 + jmp @for_cond.754 +@for_join.757 + storew 4, %.178 +@for_cond.802 + %.4158 =w loadsw %.178 + %.4159 =w csgew %.4158, 0 + jnz %.4159, @for_body.803, @for_join.805 +@for_body.803 + %.4161 =l add %.4160, 0 + %.4162 =l extsw 2 + %.4163 =l mul %.4162, 72 + %.4164 =l add $g_1616, %.4163 + %.4165 =l extsw 5 + %.4166 =l mul %.4165, 8 + %.4167 =l add %.4164, %.4166 + storel %.4167, %.4161 + %.4168 =l extsw 7 + %.4169 =l mul %.4168, 8 + %.4170 =l add %.2067, %.4169 + %.4171 =l loadl %.4170 + %.4172 =l loadl %.4160 + storel %.4171, %.4172 +@for_cont.804 + %.4173 =w loadsw %.178 + %.4174 =w sub %.4173, 1 + storew %.4174, %.178 + jmp @for_cond.802 +@for_join.805 + %.4175 =w copy 0 + storeb %.4175, $g_629 +@for_cond.806 + %.4176 =w loadsb $g_629 + %.4177 =w extsb %.4176 + %.4178 =w cslew %.4177, 7 + jnz %.4178, @for_body.807, @for_join.809 +@for_body.807 + %.4180 =l add %.4179, 0 + %.4181 =l extsw 0 + %.4182 =l copy %.4181 + storel %.4182, %.4180 + %.4184 =l add %.4183, 0 + %.4185 =l copy 2 + storel %.4185, %.4184 + %.4187 =l add %.4186, 0 + %.4188 =l extsw 0 + %.4189 =l sub %.4188, 1 + %.4190 =w copy %.4189 + storew %.4190, %.4187 + %.4192 =l add %.4191, 0 + %.4193 =w copy 734174619 + storew %.4193, %.4192 + %.4195 =l add %.4194, 0 + %.4196 =w copy 9 + storew %.4196, %.4195 + %.4198 =l add %.4197, 0 + %.4199 =l extsw 0 + %.4200 =l sub %.4199, 2 + %.4201 =w copy %.4200 + storew %.4201, %.4198 + %.4203 =l add %.4202, 0 + %.4204 =w copy 0 + storew %.4204, %.4203 + %.4206 =l add %.4205, 0 + %.4207 =l extsw 0 + %.4208 =l sub %.4207, 2 + %.4209 =w copy %.4208 + storew %.4209, %.4206 + %.4210 =l add %.4205, 4 + %.4211 =w copy 5 + storew %.4211, %.4210 + %.4212 =l add %.4205, 8 + %.4213 =l extsw 0 + %.4214 =l sub %.4213, 2 + %.4215 =w copy %.4214 + storew %.4215, %.4212 + %.4216 =l add %.4205, 12 + %.4217 =l extsw 0 + %.4218 =l sub %.4217, 2 + %.4219 =w copy %.4218 + storew %.4219, %.4216 + %.4220 =l add %.4205, 16 + %.4221 =w copy 5 + storew %.4221, %.4220 + %.4222 =l add %.4205, 20 + %.4223 =w copy 5 + storew %.4223, %.4222 + %.4224 =l add %.4205, 24 + %.4225 =w copy 3043948438 + storew %.4225, %.4224 + %.4226 =l add %.4205, 28 + %.4227 =w copy 5 + storew %.4227, %.4226 + %.4230 =w loaduw %.61 + %.4231 =w sub %.4230, 1 + storew %.4231, %.61 + %.4232 =l copy $g_130 + %.4233 =l mul 0, 1 + %.4234 =l add %.4232, %.4233 + %.4235 =l copy %.4234 + %.4236 =w loadsw %.4235 + %.4237 =w copy 8 + %.4238 =w call $safe_rshift_func_int8_t_s_s(w %.4237, w 7) + %.4239 =w extsb %.4238 + %.4240 =w or %.4236, %.4239 + %.4241 =w copy %.4240 + %.4242 =l loadl $g_1752 + %.4243 =l loadl %.68 + %.4244 =w cnel %.4242, %.4243 + %.4245 =l extsw 0 + %.4246 =w cnel %.4245, $g_1269 + %.4247 =w xor %.4244, %.4246 + %.4248 =l loadl $g_1589 + %.4249 =l loadl %.4248 + %.4250 =w loaduh %.4249 + %.4251 =l loadl %.4179 + %.4252 =l copy $g_265 + %.4253 =l mul 32, 1 + %.4254 =l add %.4252, %.4253 + %.4255 =l copy %.4254 + %.4256 =w loaduw %.4255 + %.4257 =w copy %.4256 + %.4258 =w copy 246 + %.4259 =w call $safe_mul_func_int8_t_s_s(w %.4257, w %.4258) + %.4260 =w extsb %.4259 + %.4261 =l loadl %.1 + %.4262 =w loadsw %.4261 + %.4263 =w csgew %.4260, %.4262 + %.4264 =l call $func_8(w %.4263) + %.4265 =l extsw 5 + %.4266 =l mul %.4265, 4 + %.4267 =l add %.3474, %.4266 + %.4268 =l extsw 6 + %.4269 =l mul %.4268, 4 + %.4270 =l add %.3474, %.4269 + %.4271 =l call $func_4(l %.4264, l %.4267, l %.4270) + storel %.4271, %.1 + %.4272 =l loadl %.4179 + %.4273 =l call $func_4(l %.4251, l %.4271, l %.4272) + %.4274 =l loadl %.4179 + %.4275 =l loadl %.264 + %.4276 =l call $func_4(l %.4273, l %.4274, l %.4275) + %.4277 =l loadl %.4179 + %.4278 =l extsw 6 + %.4279 =l mul %.4278, 4 + %.4280 =l add %.3474, %.4279 + %.4281 =l call $func_4(l %.4276, l %.4277, l %.4280) + %.4282 =l loadl %.4179 + %.4283 =l extsw 5 + %.4284 =l mul %.4283, 4 + %.4285 =l add %.3474, %.4284 + %.4286 =l call $func_4(l %.4281, l %.4282, l %.4285) + %.4287 =l loadl %.3455 + %.4288 =l loadl %.4287 + %.4289 =l loadl %.4288 + %.4290 =w ceql %.4286, %.4289 + %.4291 =l extsw %.4290 + %.4292 =l or %.4291, 12837 + %.4293 =w cnel %.4292, 0 + jnz %.4293, @logic_right.810, @logic_join.811 +@logic_right.810 + %.4294 =l loadl %.167 + %.4295 =w loadsw %.4294 + %.4296 =w cnew %.4295, 0 +@logic_join.811 + %.4297 =w phi @for_body.807 %.4293, @logic_right.810 %.4296 + %.4298 =w copy %.4297 + %.4299 =l loadl $g_1313 + %.4300 =l loadl %.4299 + %.4301 =l loadl %.4300 + %.4302 =l loadl %.4301 + %.4303 =w loaduw %.4302 + %.4304 =w cultw %.4298, %.4303 + %.4305 =w copy %.4304 + %.4306 =w call $safe_sub_func_uint8_t_u_u(w %.4241, w %.4305) + %.4307 =l extub %.4306 + %.4308 =l loadl %.4183 + %.4309 =l call $safe_mod_func_uint64_t_u_u(l %.4307, l %.4308) + %.4310 =w copy %.4309 + %.4311 =w copy 0 + %.4312 =w call $safe_add_func_uint16_t_u_u(w %.4310, w %.4311) + %.4313 =w extuh %.4312 + %.4314 =l loadl %.167 + storew %.4313, %.4314 + %.4315 =l copy $g_518 + %.4316 =l mul 48, 1 + %.4317 =l add %.4315, %.4316 + %.4318 =l copy %.4317 + storew 0, %.4318 +@for_cond.812 + %.4319 =l copy $g_518 + %.4320 =l mul 48, 1 + %.4321 =l add %.4319, %.4320 + %.4322 =l copy %.4321 + %.4323 =w loadsw %.4322 + %.4324 =w cslew %.4323, 0 + jnz %.4324, @for_body.813, @for_join.815 +@for_body.813 + %.4326 =w loadsw %.3475 + %.4327 =l copy $g_518 + %.4328 =l mul 48, 1 + %.4329 =l add %.4327, %.4328 + %.4330 =l copy %.4329 + %.4331 =w loadsw %.4330 + %.4332 =l extsw %.4331 + %.4333 =l mul %.4332, 4 + %.4334 =l add %.184, %.4333 + storew %.4326, %.4334 + %.4335 =l loadl %.1 + storew %.4326, %.4335 + %.4336 =l loadl %.4183 + %.4337 =l copy %.4336 + ret %.4337 +@for_cont.814 + %.4338 =l copy $g_518 + %.4339 =l mul 48, 1 + %.4340 =l add %.4338, %.4339 + %.4341 =l copy %.4340 + %.4342 =w loadsw %.4341 + %.4343 =w add %.4342, 1 + storew %.4343, %.4341 + jmp @for_cond.812 +@for_join.815 + %.4344 =l extsw 2 + %.4345 =l mul %.4344, 24 + %.4346 =l add %.2076, %.4345 + %.4347 =l extsw 4 + %.4348 =l mul %.4347, 4 + %.4349 =l add %.4346, %.4348 + %.4350 =w loaduw %.4349 + %.4351 =w add %.4350, 1 + storew %.4351, %.4349 +@for_cont.808 + %.4352 =w loadsb $g_629 + %.4353 =w extsb %.4352 + %.4354 =w add %.4353, 1 + %.4355 =w copy %.4354 + storeb %.4355, $g_629 + jmp @for_cond.806 +@for_join.809 +@if_join.749 +@for_cont.726 + %.4356 =w loadsb $g_2 + %.4357 =w extsb %.4356 + %.4358 =w add %.4357, 1 + %.4359 =w copy %.4358 + storeb %.4359, $g_2 + jmp @for_cond.724 +@for_join.727 + %.4360 =w loaduw %.310 + %.4361 =w add %.4360, 1 + storew %.4361, %.310 + %.4362 =l loadl %.317 + %.4363 =l loadl %.4362 + %.4364 =l loadl $g_1590 + %.4365 =w loaduh %.4364 + %.4366 =l loadl $g_1589 + %.4367 =l loadl %.4366 + %.4368 =w loaduh %.4367 + %.4369 =w call $safe_mod_func_uint16_t_u_u(w %.4365, w %.4368) + %.4370 =l extuh %.4369 + %.4371 =l and %.4363, %.4370 + storel %.4371, %.4362 + %.4372 =l loadl %.323 + storel %.4371, %.4372 + %.4373 =l loadl $g_1038 + %.4374 =l loadl %.4373 + %.4375 =w loaduw %.4374 + %.4376 =w cnew %.4375, 0 + jnz %.4376, @logic_join.817, @logic_right.816 +@logic_right.816 + %.4377 =l loadl $g_422 + %.4378 =w loaduw %.4377 + %.4379 =l loadl $g_422 + storew %.4378, %.4379 + %.4380 =w cnew %.4378, 0 +@logic_join.817 + %.4381 =w phi @for_join.727 %.4376, @logic_right.816 %.4380 + %.4382 =w copy %.4381 + %.4383 =w call $safe_unary_minus_func_int8_t_s(w %.4382) + %.4384 =w extsb %.4383 + %.4385 =l loadl $g_173 + storew %.4384, %.4385 + %.4386 =w copy 0 + %.4387 =l copy $g_518 + %.4388 =l mul 36, 1 + %.4389 =l add %.4387, %.4388 + %.4390 =l copy %.4389 + storew %.4386, %.4390 +@for_cond.818 + %.4391 =l copy $g_518 + %.4392 =l mul 36, 1 + %.4393 =l add %.4391, %.4392 + %.4394 =l copy %.4393 + %.4395 =w loaduw %.4394 + %.4396 =w copy 1 + %.4397 =w culew %.4395, %.4396 + jnz %.4397, @for_body.819, @for_join.821 +@for_body.819 + %.4399 =l add %.4398, 0 + %.4400 =w copy 48818 + storeh %.4400, %.4399 + %.4402 =l add %.4401, 0 + %.4403 =w copy 2698380460 + storew %.4403, %.4402 + %.4404 =l add %.4401, 4 + %.4405 =w copy 18446744073709551609 + storew %.4405, %.4404 + %.4406 =l add %.4401, 8 + %.4407 =l extsw 0 + %.4408 =l sub %.4407, 1 + %.4409 =w copy %.4408 + storeh %.4409, %.4406 + %.4410 =l add %.4401, 10 + storeh 0, %.4410 + %.4411 =l add %.4401, 12 + %.4412 =w copy 2875702494 + storew %.4412, %.4411 + %.4413 =l add %.4401, 16 + %.4414 =w copy 0 + storew %.4414, %.4413 + %.4415 =l add %.4401, 20 + %.4416 =w copy 2698380460 + storew %.4416, %.4415 + %.4417 =l add %.4401, 24 + %.4418 =w copy 18446744073709551609 + storew %.4418, %.4417 + %.4419 =l add %.4401, 28 + %.4420 =l extsw 0 + %.4421 =l sub %.4420, 1 + %.4422 =w copy %.4421 + storeh %.4422, %.4419 + %.4423 =l add %.4401, 30 + storeh 0, %.4423 + %.4424 =l add %.4401, 32 + %.4425 =w copy 2875702494 + storew %.4425, %.4424 + %.4426 =l add %.4401, 36 + %.4427 =w copy 0 + storew %.4427, %.4426 + %.4428 =l add %.4401, 40 + %.4429 =w copy 2698380460 + storew %.4429, %.4428 + %.4430 =l add %.4401, 44 + %.4431 =w copy 18446744073709551609 + storew %.4431, %.4430 + %.4432 =l add %.4401, 48 + %.4433 =l extsw 0 + %.4434 =l sub %.4433, 1 + %.4435 =w copy %.4434 + storeh %.4435, %.4432 + %.4436 =l add %.4401, 50 + storeh 0, %.4436 + %.4437 =l add %.4401, 52 + %.4438 =w copy 2875702494 + storew %.4438, %.4437 + %.4439 =l add %.4401, 56 + %.4440 =w copy 0 + storew %.4440, %.4439 + %.4441 =l add %.4401, 60 + %.4442 =w copy 2698380460 + storew %.4442, %.4441 + %.4443 =l add %.4401, 64 + %.4444 =w copy 18446744073709551609 + storew %.4444, %.4443 + %.4445 =l add %.4401, 68 + %.4446 =l extsw 0 + %.4447 =l sub %.4446, 1 + %.4448 =w copy %.4447 + storeh %.4448, %.4445 + %.4449 =l add %.4401, 70 + storeh 0, %.4449 + %.4450 =l add %.4401, 72 + %.4451 =w copy 2875702494 + storew %.4451, %.4450 + %.4452 =l add %.4401, 76 + %.4453 =w copy 0 + storew %.4453, %.4452 + %.4454 =l add %.4401, 80 + %.4455 =w copy 2698380460 + storew %.4455, %.4454 + %.4456 =l add %.4401, 84 + %.4457 =w copy 18446744073709551609 + storew %.4457, %.4456 + %.4458 =l add %.4401, 88 + %.4459 =l extsw 0 + %.4460 =l sub %.4459, 1 + %.4461 =w copy %.4460 + storeh %.4461, %.4458 + %.4462 =l add %.4401, 90 + storeh 0, %.4462 + %.4463 =l add %.4401, 92 + %.4464 =w copy 2875702494 + storew %.4464, %.4463 + %.4465 =l add %.4401, 96 + %.4466 =w copy 0 + storew %.4466, %.4465 + %.4467 =l add %.4401, 100 + %.4468 =w copy 2698380460 + storew %.4468, %.4467 + %.4469 =l add %.4401, 104 + %.4470 =w copy 18446744073709551609 + storew %.4470, %.4469 + %.4471 =l add %.4401, 108 + %.4472 =l extsw 0 + %.4473 =l sub %.4472, 1 + %.4474 =w copy %.4473 + storeh %.4474, %.4471 + %.4475 =l add %.4401, 110 + storeh 0, %.4475 + %.4476 =l add %.4401, 112 + %.4477 =w copy 2875702494 + storew %.4477, %.4476 + %.4478 =l add %.4401, 116 + %.4479 =w copy 0 + storew %.4479, %.4478 + %.4481 =l add %.4480, 0 + %.4482 =l extsw 3 + %.4483 =l mul %.4482, 448 + %.4484 =l add %.325, %.4483 + %.4485 =l extsw 0 + %.4486 =l mul %.4485, 56 + %.4487 =l add %.4484, %.4486 + %.4488 =l copy %.4487 + %.4489 =l mul 44, 1 + %.4490 =l add %.4488, %.4489 + %.4491 =l copy %.4490 + storel %.4491, %.4481 + %.4493 =l add %.4492, 0 + %.4494 =w copy 6 + storew %.4494, %.4493 + %.4496 =l add %.4495, 0 + %.4497 =w copy 3016449401 + storew %.4497, %.4496 + %.4499 =w copy 0 + %.4500 =l copy $g_130 + %.4501 =l mul 16, 1 + %.4502 =l add %.4500, %.4501 + %.4503 =l copy %.4502 + storew %.4499, %.4503 +@for_cond.822 + %.4504 =l copy $g_130 + %.4505 =l mul 16, 1 + %.4506 =l add %.4504, %.4505 + %.4507 =l copy %.4506 + %.4508 =w loaduw %.4507 + %.4509 =w copy 1 + %.4510 =w culew %.4508, %.4509 + jnz %.4510, @for_body.823, @for_join.825 +@for_body.823 + %.4513 =l add %.4512, 0 + %.4514 =w copy 1 + storeb %.4514, %.4513 + %.4515 =l add %.4512, 1 + storeb 0, %.4515 + %.4516 =l add %.4512, 2 + storeh 0, %.4516 + %.4517 =l add %.4512, 4 + storew 0, %.4517 + %.4518 =l add %.4512, 8 + %.4519 =l extsw 0 + %.4520 =l sub %.4519, 6 + %.4521 =l copy %.4520 + storel %.4521, %.4518 + %.4522 =l add %.4512, 16 + %.4523 =w copy 7 + storew %.4523, %.4522 + %.4524 =l add %.4512, 20 + storew 0, %.4524 + %.4525 =l add %.4512, 24 + %.4526 =l copy 1 + storel %.4526, %.4525 + %.4527 =l add %.4512, 32 + %.4528 =w copy 4294967295 + storew %.4528, %.4527 + %.4529 =l add %.4512, 36 + %.4530 =w copy 1 + storew %.4530, %.4529 + %.4531 =l add %.4512, 40 + %.4532 =w copy 2429467455 + storew %.4532, %.4531 + %.4533 =l add %.4512, 44 + %.4534 =w copy 762222995 + storew %.4534, %.4533 + %.4535 =l add %.4512, 48 + %.4536 =l extsw 0 + %.4537 =l sub %.4536, 1 + %.4538 =w copy %.4537 + storew %.4538, %.4535 + %.4539 =l add %.4512, 52 + storew 0, %.4539 + %.4541 =l add %.4540, 0 + %.4542 =l extsw 0 + %.4543 =l copy %.4542 + storel %.4543, %.4541 + %.4545 =l add %.4544, 0 + %.4546 =w copy 1 + storew %.4546, %.4545 + %.4548 =l add %.4547, 0 + %.4549 =l copy $g_1183 + %.4550 =l mul 48, 1 + %.4551 =l add %.4549, %.4550 + %.4552 =l copy %.4551 + storel %.4552, %.4548 + %.4554 =l add %.4553, 0 + %.4555 =w copy 9 + storew %.4555, %.4554 + %.4557 =l add %.4556, 0 + storel $g_662, %.4557 + %.4559 =l add %.4558, 0 + %.4560 =w copy 921221594 + storew %.4560, %.4559 + storew 0, %.4561 +@for_cond.826 + %.4562 =w loadsw %.4561 + %.4563 =w csltw %.4562, 1 + jnz %.4563, @for_body.827, @for_join.829 +@for_body.827 + %.4564 =l copy 7934066739426349945 + %.4565 =w loadsw %.4561 + %.4566 =l extsw %.4565 + %.4567 =l mul %.4566, 8 + %.4568 =l add %.4511, %.4567 + storel %.4564, %.4568 +@for_cont.828 + %.4569 =w loadsw %.4561 + %.4570 =w add %.4569, 1 + storew %.4570, %.4561 + jmp @for_cond.826 +@for_join.829 +@for_cont.824 + %.4571 =l copy $g_130 + %.4572 =l mul 16, 1 + %.4573 =l add %.4571, %.4572 + %.4574 =l copy %.4573 + %.4575 =w loaduw %.4574 + %.4576 =w copy 1 + %.4577 =w add %.4575, %.4576 + storew %.4577, %.4574 + jmp @for_cond.822 +@for_join.825 + %.4578 =l loadl %.4480 + %.4579 =w loadsw %.4578 + %.4580 =w cnew %.4579, 0 + jnz %.4580, @if_true.830, @if_false.831 +@if_true.830 + jmp @for_cont.820 +@if_false.831 + %.4581 =l extsw 1 + storel %.4581, $g_80 +@for_cond.832 + %.4582 =l loadl $g_80 + %.4583 =l extsw 0 + %.4584 =w csgel %.4582, %.4583 + jnz %.4584, @for_body.833, @for_join.835 +@for_body.833 + %.4586 =l add %.4585, 0 + %.4587 =w copy 255 + storeb %.4587, %.4586 + %.4589 =l add %.4588, 0 + %.4590 =w copy 1 + storew %.4590, %.4589 + %.4592 =l add %.4591, 0 + %.4593 =l extsw 4 + %.4594 =l mul %.4593, 1 + %.4595 =l add $g_132, %.4594 + storel %.4595, %.4592 + %.4597 =l add %.4596, 0 + storel %.4591, %.4597 + %.4598 =w loaduw %.4492 + %.4599 =w sub %.4598, 1 + storew %.4599, %.4492 + %.4600 =l copy $g_1183 + %.4601 =l mul 48, 1 + %.4602 =l add %.4600, %.4601 + %.4603 =l copy %.4602 + storew 0, %.4603 +@for_cond.836 + %.4604 =l copy $g_1183 + %.4605 =l mul 48, 1 + %.4606 =l add %.4604, %.4605 + %.4607 =l copy %.4606 + %.4608 =w loadsw %.4607 + %.4609 =w cslew %.4608, 1 + jnz %.4609, @for_body.837, @for_join.839 +@for_body.837 + %.4611 =l add %.4610, 0 + %.4612 =l extsw 0 + %.4613 =l copy %.4612 + storel %.4613, %.4611 + %.4615 =l add %.4614, 0 + %.4616 =w copy 2834361667 + storew %.4616, %.4615 + %.4618 =l add %.4617, 0 + %.4619 =w copy 1313316793 + storew %.4619, %.4618 + %.4620 =l add %.4617, 4 + %.4621 =w copy 3377634704 + storew %.4621, %.4620 + %.4622 =l add %.4617, 8 + %.4623 =w copy 37131 + storeh %.4623, %.4622 + %.4624 =l add %.4617, 10 + storeh 0, %.4624 + %.4625 =l add %.4617, 12 + %.4626 =w copy 3716013692 + storew %.4626, %.4625 + %.4627 =l add %.4617, 16 + %.4628 =w copy 170244838 + storew %.4628, %.4627 + %.4630 =l add %.4629, 0 + %.4631 =w copy 0 + storeb %.4631, %.4630 + %.4632 =l extsw 0 + %.4633 =l copy $g_518 + %.4634 =l mul 24, 1 + %.4635 =l add %.4633, %.4634 + %.4636 =l copy %.4635 + storel %.4632, %.4636 +@for_cond.840 + %.4637 =l copy $g_518 + %.4638 =l mul 24, 1 + %.4639 =l add %.4637, %.4638 + %.4640 =l copy %.4639 + %.4641 =l loadl %.4640 + %.4642 =l extsw 9 + %.4643 =w cultl %.4641, %.4642 + jnz %.4643, @for_body.841, @for_join.843 +@for_body.841 + %.4644 =w copy 0 + %.4645 =l copy $g_794 + %.4646 =l mul 8, 1 + %.4647 =l add %.4645, %.4646 + %.4648 =l copy %.4647 + storeh %.4644, %.4648 +@for_cond.844 + %.4649 =l copy $g_794 + %.4650 =l mul 8, 1 + %.4651 =l add %.4649, %.4650 + %.4652 =l copy %.4651 + %.4653 =w loadsh %.4652 + %.4654 =w extsh %.4653 + %.4655 =w csltw %.4654, 3 + jnz %.4655, @for_body.845, @for_join.847 +@for_body.845 + %.4656 =l copy $g_265 + %.4657 =l mul 48, 1 + %.4658 =l add %.4656, %.4657 + %.4659 =l copy %.4658 + storew 0, %.4659 +@for_cond.848 + %.4660 =l copy $g_265 + %.4661 =l mul 48, 1 + %.4662 =l add %.4660, %.4661 + %.4663 =l copy %.4662 + %.4664 =w loadsw %.4663 + %.4665 =w csltw %.4664, 1 + jnz %.4665, @for_body.849, @for_join.851 +@for_body.849 + %.4666 =l extsw 0 + %.4667 =l sub %.4666, 4 + %.4668 =w copy %.4667 + %.4669 =l copy $g_518 + %.4670 =l mul 24, 1 + %.4671 =l add %.4669, %.4670 + %.4672 =l copy %.4671 + %.4673 =l loadl %.4672 + %.4674 =l copy %.4673 + %.4675 =l mul %.4674, 12 + %.4676 =l add $g_13, %.4675 + %.4677 =l copy $g_794 + %.4678 =l mul 8, 1 + %.4679 =l add %.4677, %.4678 + %.4680 =l copy %.4679 + %.4681 =w loadsh %.4680 + %.4682 =l extsh %.4681 + %.4683 =l mul %.4682, 4 + %.4684 =l add %.4676, %.4683 + %.4685 =l copy $g_265 + %.4686 =l mul 48, 1 + %.4687 =l add %.4685, %.4686 + %.4688 =l copy %.4687 + %.4689 =w loadsw %.4688 + %.4690 =l extsw %.4689 + %.4691 =l mul %.4690, 4 + %.4692 =l add %.4684, %.4691 + storew %.4668, %.4692 +@for_cont.850 + %.4693 =l copy $g_265 + %.4694 =l mul 48, 1 + %.4695 =l add %.4693, %.4694 + %.4696 =l copy %.4695 + %.4697 =w loadsw %.4696 + %.4698 =w add %.4697, 1 + storew %.4698, %.4696 + jmp @for_cond.848 +@for_join.851 +@for_cont.846 + %.4699 =l copy $g_794 + %.4700 =l mul 8, 1 + %.4701 =l add %.4699, %.4700 + %.4702 =l copy %.4701 + %.4703 =w loadsh %.4702 + %.4704 =w extsh %.4703 + %.4705 =w add %.4704, 1 + %.4706 =w copy %.4705 + storeh %.4706, %.4702 + jmp @for_cond.844 +@for_join.847 +@for_cont.842 + %.4707 =l copy $g_518 + %.4708 =l mul 24, 1 + %.4709 =l add %.4707, %.4708 + %.4710 =l copy %.4709 + %.4711 =l loadl %.4710 + %.4712 =l extsw 1 + %.4713 =l add %.4711, %.4712 + storel %.4713, %.4710 + jmp @for_cond.840 +@for_join.843 + %.4714 =l loadl %.1 + %.4715 =w loadsw %.4714 + %.4716 =w cnew %.4715, 0 + jnz %.4716, @logic_join.853, @logic_right.852 +@logic_right.852 + %.4717 =w copy 59312 + %.4718 =w call $safe_unary_minus_func_int16_t_s(w %.4717) + %.4719 =w extsh %.4718 + storew %.4719, %.4614 + %.4720 =w copy %.4719 + %.4721 =l loadl %.4480 + %.4722 =w loadsw %.4721 + %.4723 =w copy %.4722 + %.4724 =w call $safe_mul_func_int8_t_s_s(w %.4720, w %.4723) + %.4725 =w extsb %.4724 + %.4726 =l copy $g_1183 + %.4727 =l mul 8, 1 + %.4728 =l add %.4726, %.4727 + %.4729 =l copy %.4728 + %.4730 =l loadl %.4729 + %.4731 =l or 3025800570176797084, %.4730 + %.4732 =w copy %.4731 + %.4733 =w call $safe_lshift_func_uint16_t_u_s(w %.4732, w 8) + %.4734 =w extuh %.4733 + %.4735 =w cnew %.4725, %.4734 + %.4736 =w cnew %.4735, 0 +@logic_join.853 + %.4737 =w phi @for_join.843 %.4716, @logic_right.852 %.4736 + %.4738 =l loadl %.266 + %.4739 =w loadsw %.4738 + %.4740 =w loadsw %.4614 + %.4741 =w copy %.4740 + %.4742 =l loadl %.278 + %.4743 =w loadsw %.4742 + %.4744 =w copy %.4743 + %.4745 =w call $safe_unary_minus_func_int16_t_s(w %.4744) + %.4746 =w copy %.4745 + %.4747 =w call $safe_add_func_int8_t_s_s(w %.4741, w %.4746) + %.4748 =l extsb %.4747 + %.4749 =l loadl $g_1589 + %.4750 =l loadl %.4749 + %.4751 =w loaduh %.4750 + %.4752 =l extuh %.4751 + %.4753 =w csgtl %.4752, 35293 + %.4754 =l loadl %.4480 + storew %.4753, %.4754 + %.4755 =l extsw %.4753 + %.4756 =l call $safe_add_func_int64_t_s_s(l %.4748, l %.4755) + %.4757 =w copy %.4756 + %.4758 =w loadsw %.4614 + %.4759 =w copy %.4758 + %.4760 =w call $safe_rshift_func_int8_t_s_u(w %.4757, w %.4759) + %.4761 =w extsb %.4760 + %.4762 =w csgtw %.4739, %.4761 + %.4763 =w loadsh %.1645 + %.4764 =w extsh %.4763 + %.4765 =w call $safe_div_func_int32_t_s_s(w %.4762, w %.4764) + %.4766 =w loadub %.4585 + %.4767 =w extub %.4766 + %.4768 =w cnew %.4767, 0 + jnz %.4768, @if_true.854, @if_false.855 +@if_true.854 + %.4770 =l add %.4769, 0 + %.4771 =w copy 13655 + storeh %.4771, %.4770 + %.4774 =l add %.4773, 0 + %.4775 =l extsw 4 + %.4776 =l mul %.4775, 1 + %.4777 =l add $g_132, %.4776 + storel %.4777, %.4774 + %.4779 =l add %.4778, 0 + %.4780 =l extsw 0 + %.4781 =l copy %.4780 + storel %.4781, %.4779 + %.4783 =l add %.4782, 0 + storel $g_81, %.4783 + %.4785 =l add %.4784, 0 + %.4786 =l extsw 0 + %.4787 =l mul %.4786, 4 + %.4788 =l add %.184, %.4787 + storel %.4788, %.4785 + storew 0, %.4789 +@for_cond.856 + %.4790 =w loadsw %.4789 + %.4791 =w csltw %.4790, 3 + jnz %.4791, @for_body.857, @for_join.859 +@for_body.857 + %.4792 =l extsw 0 + %.4793 =l copy %.4792 + %.4794 =w loadsw %.4789 + %.4795 =l extsw %.4794 + %.4796 =l mul %.4795, 8 + %.4797 =l add %.4772, %.4796 + storel %.4793, %.4797 +@for_cont.858 + %.4798 =w loadsw %.4789 + %.4799 =w add %.4798, 1 + storew %.4799, %.4789 + jmp @for_cond.856 +@for_join.859 + %.4800 =l loadl %.167 + %.4801 =w loadsw %.4800 + %.4802 =l extsw 0 + %.4803 =l sub %.4802, 1 + %.4804 =w cnel %.4803, 0 + jnz %.4804, @logic_right.860, @logic_join.861 +@logic_right.860 + %.4805 =w loaduh %.4769 + %.4806 =w extuh %.4805 + %.4807 =w cnew %.4806, 0 +@logic_join.861 + %.4808 =w phi @for_join.859 %.4804, @logic_right.860 %.4807 + %.4809 =l extsw %.4808 + %.4810 =l loadl %.306 + %.4811 =w loadsw %.4810 + %.4812 =l loadl $g_296 + %.4813 =l loadl %.4812 + %.4814 =l loadl %.4773 + %.4815 =w loadsb %.4814 + %.4816 =w extsb %.4815 + %.4817 =w loaduh %.4769 + %.4818 =w extuh %.4817 + %.4819 =l loadl %.4480 + %.4820 =w loadsw %.4819 + %.4821 =w or %.4818, %.4820 + %.4822 =l extsw 3 + %.4823 =l mul %.4822, 400 + %.4824 =l add %.1648, %.4823 + %.4825 =l extsw 7 + %.4826 =l mul %.4825, 40 + %.4827 =l add %.4824, %.4826 + %.4828 =l extsw 1 + %.4829 =l mul %.4828, 8 + %.4830 =l add %.4827, %.4829 + %.4831 =l loadl %.4830 + %.4832 =w loadsh $g_1922 + %.4833 =l extsh %.4832 + %.4834 =w cugel %.4831, %.4833 + %.4835 =l extsw 0 + %.4836 =l sub %.4835, 9 + %.4837 =l extsw 0 + %.4838 =l mul %.4837, 8 + %.4839 =l add %.4772, %.4838 + %.4840 =l loadl %.4839 + %.4841 =l loadl %.83 + %.4842 =w ceql %.4840, %.4841 + %.4843 =l extsw %.4842 + %.4844 =w cugel 255, %.4843 + %.4845 =w cslew %.4821, %.4844 + %.4846 =w copy %.4845 + %.4847 =l copy %.4617 + %.4848 =l mul 4, 1 + %.4849 =l add %.4847, %.4848 + %.4850 =l copy %.4849 + %.4851 =w loaduw %.4850 + %.4852 =w cugew %.4846, %.4851 + %.4853 =w copy %.4852 + %.4854 =l loadl $g_1590 + %.4855 =w loaduh %.4854 + %.4856 =w extuh %.4855 + %.4857 =w call $safe_rshift_func_uint16_t_u_u(w %.4853, w %.4856) + %.4858 =w extuh %.4857 + %.4859 =w loadub %.4585 + %.4860 =w extub %.4859 + %.4861 =w or %.4858, %.4860 + %.4862 =l loadl $g_1589 + %.4863 =l loadl %.4862 + %.4864 =w loaduh %.4863 + %.4865 =w extuh %.4864 + %.4866 =w csgew %.4861, %.4865 + %.4867 =w xor %.4816, %.4866 + %.4868 =w copy %.4867 + storeb %.4868, %.4814 + %.4869 =w extsb %.4868 + %.4870 =l loadl %.289 + %.4871 =w loadsw %.4870 + %.4872 =w cnew %.4869, %.4871 + %.4873 =w copy %.4872 + %.4874 =l loadl %.4782 + storeh %.4873, %.4874 + %.4875 =w extsh %.4873 + %.4876 =w cnew %.4875, 0 + jnz %.4876, @logic_right.864, @logic_join.865 +@logic_right.864 + %.4877 =l loadl %.4480 + %.4878 =w loadsw %.4877 + %.4879 =w cnew %.4878, 0 +@logic_join.865 + %.4880 =w phi @logic_join.861 %.4876, @logic_right.864 %.4879 + %.4881 =w copy %.4880 + %.4882 =w copy 39728 + %.4883 =w call $safe_add_func_uint16_t_u_u(w %.4881, w %.4882) + %.4884 =w extuh %.4883 + %.4885 =l extsw %.4884 + %.4886 =w cugtl %.4885, 248 + %.4887 =w copy %.4886 + %.4888 =l loadl $g_1038 + %.4889 =l loadl %.4888 + %.4890 =w loaduw %.4889 + %.4891 =w call $safe_add_func_uint32_t_u_u(w %.4887, w %.4890) + %.4892 =w copy %.4891 + storeb %.4892, %.4629 + %.4893 =l extsb %.4892 + %.4894 =l call $safe_div_func_int64_t_s_s(l %.4893, l 7921571751143292974) + %.4895 =l loadl %.4480 + %.4896 =w loadsw %.4895 + %.4897 =l extsw %.4896 + %.4898 =w csgel %.4894, %.4897 + %.4899 =l loadl %.264 + %.4900 =w loadsw %.4899 + %.4901 =w copy %.4900 + %.4902 =w loadub %.4585 + %.4903 =w extub %.4902 + %.4904 =w call $safe_mod_func_int16_t_s_s(w %.4901, w %.4903) + %.4905 =l extsh %.4904 + %.4906 =l loadl %.4480 + %.4907 =w loadsw %.4906 + %.4908 =l extsw %.4907 + %.4909 =l call $safe_mod_func_int64_t_s_s(l %.4905, l %.4908) + %.4910 =l copy 0 + %.4911 =l or %.4909, %.4910 + %.4912 =w copy %.4911 + %.4913 =w call $safe_lshift_func_uint16_t_u_s(w %.4912, w 6) + %.4914 =w extuh %.4913 + %.4915 =l loadl %.266 + storew %.4914, %.4915 + %.4916 =w ceqw %.4811, %.4914 + %.4917 =w copy %.4916 + %.4918 =w copy 91 + %.4919 =w call $safe_div_func_int8_t_s_s(w %.4917, w %.4918) + %.4920 =w extsb %.4919 + %.4921 =w cnew %.4920, 0 + jnz %.4921, @logic_right.862, @logic_join.863 +@logic_right.862 + %.4922 =w cnel 1635734213, 0 +@logic_join.863 + %.4923 =w phi @logic_join.865 %.4921, @logic_right.862 %.4922 + %.4924 =w copy %.4923 + %.4925 =l loadl %.278 + %.4926 =w loadsw %.4925 + %.4927 =w call $safe_rshift_func_uint16_t_u_s(w %.4924, w %.4926) + %.4928 =w loaduh %.4769 + %.4929 =w extuh %.4928 + %.4930 =w loadsw %.4614 + %.4931 =w cslew %.4929, %.4930 + %.4932 =w loadub %.4585 + %.4933 =w extub %.4932 + %.4934 =w or %.4931, %.4933 + %.4935 =w loaduh %.4769 + %.4936 =w extuh %.4935 + %.4937 =w cslew %.4934, %.4936 + %.4938 =w cnel %.4809, 6213 + %.4939 =w and %.4801, %.4938 + storew %.4939, %.4800 + %.4940 =l loadl %.4784 + %.4941 =l loadl %.4480 + %.4942 =w ceql %.4940, %.4941 + %.4943 =l loadl %.264 + storew %.4942, %.4943 + %.4944 =l loadl $g_1123 + %.4945 =l extsw 4 + %.4946 =l mul %.4945, 20 + %.4947 =l add %.4401, %.4946 + %.4948 =l loaduw %.4944 + storew %.4948, %.4947 + %.4949 =l add %.4944, 4 + %.4950 =l add %.4947, 4 + %.4951 =l loaduw %.4949 + storew %.4951, %.4950 + %.4952 =l add %.4949, 4 + %.4953 =l add %.4950, 4 + %.4954 =l loaduw %.4952 + storew %.4954, %.4953 + %.4955 =l add %.4952, 4 + %.4956 =l add %.4953, 4 + %.4957 =l loaduw %.4955 + storew %.4957, %.4956 + %.4958 =l add %.4955, 4 + %.4959 =l add %.4956, 4 + %.4960 =l loaduw %.4958 + storew %.4960, %.4959 + %.4961 =l add %.4958, 4 + %.4962 =l add %.4959, 4 + %.4963 =w loaduw %.4495 + %.4964 =w cnew %.4963, 0 + jnz %.4964, @if_true.866, @if_false.867 +@if_true.866 + jmp @for_join.839 +@if_false.867 + jmp @if_join.868 +@if_false.855 + %.4966 =l add %.4965, 0 + storel 7722482555776156576, %.4966 + %.4968 =l add %.4967, 0 + storel $g_1604, %.4968 + %.4970 =l add %.4969, 0 + storel %.4596, %.4970 + %.4971 =l loadl %.4480 + %.4972 =w loadsw %.4971 + %.4973 =l copy %.4617 + %.4974 =l mul 16, 1 + %.4975 =l add %.4973, %.4974 + %.4976 =l copy %.4975 + %.4977 =w loaduw %.4976 + %.4978 =w copy %.4977 + %.4979 =w call $safe_rshift_func_int16_t_s_s(w %.4978, w 15) + %.4980 =w extsh %.4979 + %.4981 =l loadl %.4965 + %.4982 =l loadl $g_1590 + %.4983 =w loaduh %.4982 + %.4984 =l extuh %.4983 + %.4985 =l xor %.4984, 5122 + %.4986 =w copy %.4985 + storeh %.4986, %.4982 + %.4987 =w extuh %.4986 + %.4988 =w xor %.4987, 18446744073709551615 + %.4989 =w loadsw %.4614 + %.4990 =l copy $g_185 + %.4991 =l mul 24, 1 + %.4992 =l add %.4990, %.4991 + %.4993 =l copy %.4992 + %.4994 =l loadl %.4993 + %.4995 =l copy 0 + %.4996 =l loadl %.4967 + storel %.4995, %.4996 + %.4997 =l loadl %.295 + %.4998 =w loadsw %.4997 + %.4999 =w copy %.4998 + %.5000 =w copy 65533 + %.5001 =l copy %.4617 + %.5002 =l mul 8, 1 + %.5003 =l add %.5001, %.5002 + %.5004 =l copy %.5003 + %.5005 =w loadsh %.5004 + %.5006 =w copy %.5005 + %.5007 =w call $safe_add_func_uint16_t_u_u(w %.5000, w %.5006) + %.5008 =w extuh %.5007 + %.5009 =w call $safe_rshift_func_uint8_t_u_u(w %.4999, w %.5008) + %.5010 =w extub %.5009 + %.5011 =w cnew %.5010, 0 + jnz %.5011, @logic_right.871, @logic_join.872 +@logic_right.871 + %.5012 =l extsw 0 + %.5013 =l sub %.5012, 9 + %.5014 =l copy %.5013 + %.5015 =l copy $g_1183 + %.5016 =l mul 24, 1 + %.5017 =l add %.5015, %.5016 + %.5018 =l copy %.5017 + %.5019 =l loadl %.5018 + %.5020 =w cugtl %.5014, %.5019 + %.5021 =w cnew %.5020, 0 +@logic_join.872 + %.5022 =w phi @if_false.855 %.5011, @logic_right.871 %.5021 + %.5023 =w copy %.5022 + %.5024 =l loadl $g_1038 + %.5025 =l loadl %.5024 + %.5026 =w loaduw %.5025 + %.5027 =w xor %.5023, %.5026 + %.5028 =l extuw %.5027 + %.5029 =l call $safe_sub_func_uint64_t_u_u(l %.4995, l %.5028) + %.5030 =l extsw 0 + %.5031 =l extsw 0 + %.5032 =w cnel %.5030, %.5031 + %.5033 =l extsw %.5032 + %.5034 =l or %.4994, %.5033 + storel %.5034, %.4993 + %.5035 =l copy $g_1183 + %.5036 =l mul 36, 1 + %.5037 =l add %.5035, %.5036 + %.5038 =l copy %.5037 + %.5039 =w loaduw %.5038 + %.5040 =l extuw %.5039 + %.5041 =l xor %.5034, %.5040 + %.5042 =w cnel %.5041, 0 + jnz %.5042, @logic_right.869, @logic_join.870 +@logic_right.869 + %.5043 =l copy %.4617 + %.5044 =l mul 0, 1 + %.5045 =l add %.5043, %.5044 + %.5046 =l copy %.5045 + %.5047 =w loadsw %.5046 + %.5048 =w cnew %.5047, 0 +@logic_join.870 + %.5049 =w phi @logic_join.872 %.5042, @logic_right.869 %.5048 + %.5050 =w call $safe_mod_func_int32_t_s_s(w %.4980, w %.5049) + %.5051 =w or %.4972, %.5050 + storew %.5051, %.4971 + %.5052 =w loaduw %.2010 + %.5053 =w copy %.5052 + %.5054 =l loadl %.4480 + storew %.5053, %.5054 + %.5055 =w loadsw %.4588 + %.5056 =l loadl $g_1269 + %.5057 =l loadl %.5056 + %.5058 =l loadl %.5057 + %.5059 =l loadl %.4480 + %.5060 =l copy $g_1183 + %.5061 =l mul 48, 1 + %.5062 =l add %.5060, %.5061 + %.5063 =l copy %.5062 + %.5064 =w loadsw %.5063 + %.5065 =w loadsw %.4614 + %.5066 =l loadl %.1 + %.5067 =w loadsw %.5066 + %.5068 =w xor %.5065, %.5067 + storew %.5068, %.4614 + %.5069 =w copy 5802 + %.5070 =l loadl $g_1590 + storeh %.5069, %.5070 + %.5071 =w extuh %.5069 + %.5072 =w cnew %.5071, 0 + jnz %.5072, @logic_join.874, @logic_right.873 +@logic_right.873 + %.5073 =l loadl $g_1038 + %.5074 =l loadl %.5073 + %.5075 =w loaduw %.5074 + %.5076 =w copy 3856688714 + %.5077 =w call $safe_div_func_uint32_t_u_u(w %.5075, w %.5076) + %.5078 =l extuw %.5077 + %.5079 =l loadl %.317 + %.5080 =l loadl %.5079 + %.5081 =l extsw 4 + %.5082 =l mul %.5081, 20 + %.5083 =l add %.4401, %.5082 + %.5084 =l extsw 1 + %.5085 =l mul %.5084, 448 + %.5086 =l add %.325, %.5085 + %.5087 =l extsw 3 + %.5088 =l mul %.5087, 56 + %.5089 =l add %.5086, %.5088 + %.5090 =l loadl $g_422 + %.5091 =w loaduw %.5090 + %.5092 =l extuw %.5091 + %.5093 =l or %.5092, 2844124571 + %.5094 =l loadl %.4965 + %.5095 =l loadl $g_1589 + %.5096 =l loadl %.5095 + %.5097 =w loaduh %.5096 + %.5098 =l extuh %.5097 + %.5099 =w csltl %.5094, %.5098 + %.5100 =l extsw %.5099 + %.5101 =l or %.5080, %.5100 + storel %.5101, %.5079 + %.5102 =w csgtl %.5078, %.5101 + %.5103 =l loadl %.4480 + %.5104 =w loadsw %.5103 + %.5105 =w cnew %.5104, 0 +@logic_join.874 + %.5106 =w phi @logic_join.870 %.5072, @logic_right.873 %.5105 + %.5107 =l extsw %.5106 + %.5108 =l loadl %.4965 + %.5109 =l copy %.5108 + %.5110 =l call $safe_mod_func_uint64_t_u_u(l %.5107, l %.5109) + %.5111 =l loadl %.4965 + %.5112 =l copy %.5111 + %.5113 =l or %.5110, %.5112 + %.5114 =l copy 3166269750 + %.5115 =l or %.5113, %.5114 + %.5116 =l loadl $g_23 + %.5117 =w loadsw %.5116 + %.5118 =l extsw %.5117 + %.5119 =w ceql %.5115, %.5118 + %.5120 =w cnew %.5064, %.5119 + %.5121 =l extsw 0 + %.5122 =w ceql %.5059, %.5121 + %.5123 =w xor %.5055, %.5122 + storew %.5123, %.4588 + %.5124 =l loadl %.4596 + %.5125 =l loadl %.4969 + storel %.5124, %.5125 +@if_join.868 + %.5126 =l copy $g_185 + %.5127 =l mul 40, 1 + %.5128 =l add %.5126, %.5127 + %.5129 =l copy %.5128 + storew 1, %.5129 +@for_cond.875 + %.5130 =l copy $g_185 + %.5131 =l mul 40, 1 + %.5132 =l add %.5130, %.5131 + %.5133 =l copy %.5132 + %.5134 =w loadsw %.5133 + %.5135 =w csgew %.5134, 0 + jnz %.5135, @for_body.876, @for_join.878 +@for_body.876 + %.5137 =l add %.5136, 0 + %.5138 =w copy 0 + storeb %.5138, %.5137 + %.5139 =l copy %.4617 + %.5140 =l mul 8, 1 + %.5141 =l add %.5139, %.5140 + %.5142 =l copy %.5141 + %.5143 =w loadsh %.5142 + %.5144 =w extsh %.5143 + %.5145 =l loadl $g_23 + storew %.5144, %.5145 + %.5146 =w loadub %.5136 + %.5147 =w sub %.5146, 1 + storeb %.5147, %.5136 +@for_cont.877 + %.5148 =l copy $g_185 + %.5149 =l mul 40, 1 + %.5150 =l add %.5148, %.5149 + %.5151 =l copy %.5150 + %.5152 =w loadsw %.5151 + %.5153 =w sub %.5152, 1 + storew %.5153, %.5151 + jmp @for_cond.875 +@for_join.878 + %.5154 =l copy %.4617 + %.5155 =l mul 12, 1 + %.5156 =l add %.5154, %.5155 + %.5157 =l copy %.5156 + %.5158 =w loadsw %.5157 + %.5159 =w cnew %.5158, 0 + jnz %.5159, @if_true.879, @if_false.880 +@if_true.879 + jmp @for_cont.838 +@if_false.880 +@for_cont.838 + %.5160 =l copy $g_1183 + %.5161 =l mul 48, 1 + %.5162 =l add %.5160, %.5161 + %.5163 =l copy %.5162 + %.5164 =w loadsw %.5163 + %.5165 =w add %.5164, 1 + storew %.5165, %.5163 + jmp @for_cond.836 +@for_join.839 +@for_cont.834 + %.5166 =l loadl $g_80 + %.5167 =l extsw 1 + %.5168 =l sub %.5166, %.5167 + storel %.5168, $g_80 + jmp @for_cond.832 +@for_join.835 +@for_cont.820 + %.5169 =l copy $g_518 + %.5170 =l mul 36, 1 + %.5171 =l add %.5169, %.5170 + %.5172 =l copy %.5171 + %.5173 =w loaduw %.5172 + %.5174 =w copy 1 + %.5175 =w add %.5173, %.5174 + storew %.5175, %.5172 + jmp @for_cond.818 +@for_join.821 + jmp @if_join.881 +@if_false.711 + %.5177 =l add %.5176, 0 + %.5178 =w copy 2069187283 + storew %.5178, %.5177 + %.5180 =l add %.5179, 0 + %.5181 =w copy 1 + storew %.5181, %.5180 + %.5183 =l add %.5182, 0 + %.5184 =l extsw 0 + %.5185 =l sub %.5184, 5 + %.5186 =w copy %.5185 + storew %.5186, %.5183 + %.5188 =l add %.5187, 0 + %.5189 =w copy 255 + storeb %.5189, %.5188 + %.5191 =l add %.5190, 0 + storel $g_518, %.5191 + %.5194 =l add %.5193, 0 + %.5195 =l copy 3 + storel %.5195, %.5194 + %.5197 =l add %.5196, 0 + %.5198 =w copy 23235 + storeh %.5198, %.5197 + %.5200 =l add %.5199, 0 + %.5201 =l copy 18446744073709551611 + storel %.5201, %.5200 + storew 0, %.5203 +@for_cond.882 + %.5204 =w loadsw %.5203 + %.5205 =w csltw %.5204, 2 + jnz %.5205, @for_body.883, @for_join.885 +@for_body.883 + %.5206 =w copy 4192325373 + %.5207 =w loadsw %.5203 + %.5208 =l extsw %.5207 + %.5209 =l mul %.5208, 4 + %.5210 =l add %.5192, %.5209 + storew %.5206, %.5210 +@for_cont.884 + %.5211 =w loadsw %.5203 + %.5212 =w add %.5211, 1 + storew %.5212, %.5203 + jmp @for_cond.882 +@for_join.885 + storew 0, %.5203 +@for_cond.886 + %.5213 =w loadsw %.5203 + %.5214 =w csltw %.5213, 5 + jnz %.5214, @for_body.887, @for_join.889 +@for_body.887 + %.5215 =w copy 1346007472 + %.5216 =w loadsw %.5203 + %.5217 =l extsw %.5216 + %.5218 =l mul %.5217, 4 + %.5219 =l add %.5202, %.5218 + storew %.5215, %.5219 +@for_cont.888 + %.5220 =w loadsw %.5203 + %.5221 =w add %.5220, 1 + storew %.5221, %.5203 + jmp @for_cond.886 +@for_join.889 + %.5222 =l extsw 0 + storel %.5222, $g_82 +@for_cond.890 + %.5223 =l loadl $g_82 + %.5224 =l extsw 0 + %.5225 =w csgel %.5223, %.5224 + jnz %.5225, @for_body.891, @for_join.893 +@for_body.891 + %.5227 =l add %.5226, 0 + %.5228 =l copy $g_265 + %.5229 =l mul 0, 1 + %.5230 =l add %.5228, %.5229 + %.5231 =l copy %.5230 + storel %.5231, %.5227 + %.5233 =l add %.5232, 0 + %.5234 =l extsw 0 + %.5235 =l mul %.5234, 1 + %.5236 =l add $g_132, %.5235 + storel %.5236, %.5233 + %.5237 =l add %.5232, 8 + %.5238 =l extsw 0 + %.5239 =l mul %.5238, 1 + %.5240 =l add $g_132, %.5239 + storel %.5240, %.5237 + %.5241 =l add %.5232, 16 + %.5242 =l extsw 0 + %.5243 =l mul %.5242, 1 + %.5244 =l add $g_132, %.5243 + storel %.5244, %.5241 + %.5245 =l add %.5232, 24 + %.5246 =l extsw 0 + %.5247 =l mul %.5246, 1 + %.5248 =l add $g_132, %.5247 + storel %.5248, %.5245 + %.5249 =l add %.5232, 32 + %.5250 =l extsw 0 + %.5251 =l mul %.5250, 1 + %.5252 =l add $g_132, %.5251 + storel %.5252, %.5249 + %.5253 =l add %.5232, 40 + %.5254 =l extsw 0 + %.5255 =l mul %.5254, 1 + %.5256 =l add $g_132, %.5255 + storel %.5256, %.5253 + %.5257 =l add %.5232, 48 + %.5258 =l extsw 0 + %.5259 =l mul %.5258, 1 + %.5260 =l add $g_132, %.5259 + storel %.5260, %.5257 + %.5261 =l add %.5232, 56 + %.5262 =l extsw 0 + %.5263 =l mul %.5262, 1 + %.5264 =l add $g_132, %.5263 + storel %.5264, %.5261 + %.5266 =l add %.5265, 0 + %.5267 =w copy 255 + storeb %.5267, %.5266 + %.5268 =l add %.5265, 1 + storeb 0, %.5268 + %.5269 =l add %.5265, 2 + storeh 0, %.5269 + %.5270 =l add %.5265, 4 + storew 0, %.5270 + %.5271 =l add %.5265, 8 + %.5272 =l copy 3 + storel %.5272, %.5271 + %.5273 =l add %.5265, 16 + %.5274 =l extsw 0 + %.5275 =l sub %.5274, 3 + %.5276 =w copy %.5275 + storew %.5276, %.5273 + %.5277 =l add %.5265, 20 + storew 0, %.5277 + %.5278 =l add %.5265, 24 + %.5279 =l copy 18446744073709551615 + storel %.5279, %.5278 + %.5280 =l add %.5265, 32 + %.5281 =w copy 4294967286 + storew %.5281, %.5280 + %.5282 =l add %.5265, 36 + %.5283 =w copy 0 + storew %.5283, %.5282 + %.5284 =l add %.5265, 40 + %.5285 =l extsw 0 + %.5286 =l sub %.5285, 1 + %.5287 =w copy %.5286 + storew %.5287, %.5284 + %.5288 =l add %.5265, 44 + %.5289 =l extsw 0 + %.5290 =l sub %.5289, 2 + %.5291 =w copy %.5290 + storew %.5291, %.5288 + %.5292 =l add %.5265, 48 + %.5293 =w copy 0 + storew %.5293, %.5292 + %.5294 =l add %.5265, 52 + storew 0, %.5294 + %.5296 =l add %.5295, 0 + storel $g_425, %.5296 + %.5298 =l add %.5297, 0 + %.5299 =l extsw 0 + %.5300 =l copy %.5299 + storel %.5300, %.5298 + %.5302 =l add %.5301, 0 + %.5303 =l copy $g_185 + %.5304 =l mul 32, 1 + %.5305 =l add %.5303, %.5304 + %.5306 =l copy %.5305 + storel %.5306, %.5302 + %.5308 =l add %.5307, 0 + %.5309 =l copy %.5265 + %.5310 =l mul 44, 1 + %.5311 =l add %.5309, %.5310 + %.5312 =l copy %.5311 + storel %.5312, %.5308 + %.5314 =l add %.5313, 0 + %.5315 =l copy $g_1183 + %.5316 =l mul 40, 1 + %.5317 =l add %.5315, %.5316 + %.5318 =l copy %.5317 + storel %.5318, %.5314 + %.5320 =l add %.5319, 0 + %.5321 =w copy 1123578037 + storew %.5321, %.5320 + %.5323 =l add %.5322, 0 + %.5324 =w copy 222 + storeb %.5324, %.5323 + %.5326 =l add %.5325, 0 + %.5327 =w copy 852478378 + storew %.5327, %.5326 + %.5329 =l loadl $g_23 + %.5330 =w loadsw %.5329 + %.5331 =w loaduw %.85 + %.5332 =l extuw %.5331 + %.5333 =l extsw 0 + %.5334 =l sub %.5333, 4 + %.5335 =w cnel %.5334, 0 + jnz %.5335, @logic_join.895, @logic_right.894 +@logic_right.894 + %.5336 =w loadsw %.5179 + %.5337 =w copy %.5336 + %.5338 =w loaduw %.5176 + %.5339 =w copy 6 + %.5340 =l loadl %.5226 + storeb %.5339, %.5340 + %.5341 =w copy 1 + %.5342 =w call $safe_lshift_func_uint8_t_u_u(w %.5339, w %.5341) + %.5343 =l copy $g_130 + %.5344 =l mul 16, 1 + %.5345 =l add %.5343, %.5344 + %.5346 =l copy %.5345 + %.5347 =w loaduw %.5346 + %.5348 =w xor %.5337, %.5347 + %.5349 =w copy %.5348 + storew %.5349, %.5179 + %.5350 =w cnew %.5349, 0 +@logic_join.895 + %.5351 =w phi @for_body.891 %.5335, @logic_right.894 %.5350 + %.5352 =l extsw %.5351 + %.5353 =w loadsw %.5182 + %.5354 =l loadl $g_1269 + %.5355 =l loadl %.5354 + %.5356 =l loadl %.5355 + %.5357 =w cnel $g_1706, $g_1706 + %.5358 =w copy %.5357 + %.5359 =w call $safe_unary_minus_func_int16_t_s(w %.5358) + %.5360 =w extsh %.5359 + %.5361 =w loaduw %.5176 + %.5362 =w cultw %.5360, %.5361 + %.5363 =w cnew %.5362, 0 + jnz %.5363, @logic_join.897, @logic_right.896 +@logic_right.896 + %.5364 =l copy %.5265 + %.5365 =l mul 44, 1 + %.5366 =l add %.5364, %.5365 + %.5367 =l copy %.5366 + %.5368 =w loadsw %.5367 + %.5369 =w cnew %.5368, 0 +@logic_join.897 + %.5370 =w phi @logic_join.895 %.5363, @logic_right.896 %.5369 + %.5371 =w loadsw %.5182 + %.5372 =w copy %.5371 + %.5373 =l loadl $g_1590 + storeh %.5372, %.5373 + %.5374 =l loadl %.5295 + storeh %.5372, %.5374 + %.5375 =l extuh %.5372 + %.5376 =l loadl $g_1972 + %.5377 =l and %.5375, %.5376 + %.5378 =w loaduw %.5176 + %.5379 =l extuw %.5378 + %.5380 =l and %.5377, %.5379 + %.5381 =l copy %.5380 + %.5382 =l copy 0 + %.5383 =w cugel %.5381, %.5382 + %.5384 =w copy %.5383 + %.5385 =w loaduw %.5176 + %.5386 =w cultw %.5384, %.5385 + %.5387 =w or %.5353, %.5386 + %.5388 =w ceql %.5352, 65529 + %.5389 =w loadsh $g_81 + %.5390 =l copy 18446744073709551615 + %.5391 =l call $safe_div_func_int64_t_s_s(l %.5332, l %.5390) + %.5392 =w copy %.5391 + %.5393 =l copy $g_1183 + %.5394 =l mul 24, 1 + %.5395 =l add %.5393, %.5394 + %.5396 =l copy %.5395 + %.5397 =l loadl %.5396 + %.5398 =w copy %.5397 + %.5399 =w call $safe_rshift_func_int8_t_s_s(w %.5392, w %.5398) + %.5400 =w extsb %.5399 + %.5401 =w or %.5330, %.5400 + storew %.5401, %.5329 + %.5402 =l copy %.5265 + %.5403 =l mul 48, 1 + %.5404 =l add %.5402, %.5403 + %.5405 =l copy %.5404 + storew 0, %.5405 +@for_cond.898 + %.5406 =l copy %.5265 + %.5407 =l mul 48, 1 + %.5408 =l add %.5406, %.5407 + %.5409 =l copy %.5408 + %.5410 =w loadsw %.5409 + %.5411 =w cslew %.5410, 0 + jnz %.5411, @for_body.899, @for_join.901 +@for_body.899 + %.5413 =l add %.5412, 0 + %.5414 =w copy 1469999110 + storew %.5414, %.5413 + %.5416 =l add %.5415, 0 + %.5417 =l extsw 0 + %.5418 =l copy %.5417 + storel %.5418, %.5416 + %.5420 =l add %.5419, 0 + %.5421 =l extsw 0 + %.5422 =l copy %.5421 + storel %.5422, %.5420 + %.5424 =l add %.5423, 0 + %.5425 =l extsw 0 + %.5426 =l copy %.5425 + storel %.5426, %.5424 + %.5427 =l add %.5423, 8 + %.5428 =l extsw 9 + %.5429 =l mul %.5428, 4 + %.5430 =l add %.15, %.5429 + storel %.5430, %.5427 + %.5431 =l add %.5423, 16 + %.5432 =l extsw 9 + %.5433 =l mul %.5432, 4 + %.5434 =l add %.15, %.5433 + storel %.5434, %.5431 + %.5435 =l add %.5423, 24 + storel %.5179, %.5435 + %.5436 =l add %.5423, 32 + %.5437 =l extsw 9 + %.5438 =l mul %.5437, 4 + %.5439 =l add %.15, %.5438 + storel %.5439, %.5436 + %.5440 =l add %.5423, 40 + %.5441 =l extsw 9 + %.5442 =l mul %.5441, 4 + %.5443 =l add %.15, %.5442 + storel %.5443, %.5440 + %.5444 =l add %.5423, 48 + %.5445 =l extsw 0 + %.5446 =l copy %.5445 + storel %.5446, %.5444 + %.5447 =l add %.5423, 56 + %.5448 =l copy $g_185 + %.5449 =l mul 40, 1 + %.5450 =l add %.5448, %.5449 + %.5451 =l copy %.5450 + storel %.5451, %.5447 + %.5452 =l add %.5423, 64 + %.5453 =l extsw 9 + %.5454 =l mul %.5453, 4 + %.5455 =l add %.15, %.5454 + storel %.5455, %.5452 + %.5456 =l add %.5423, 72 + %.5457 =l extsw 9 + %.5458 =l mul %.5457, 4 + %.5459 =l add %.15, %.5458 + storel %.5459, %.5456 + %.5460 =l add %.5423, 80 + %.5461 =l extsw 0 + %.5462 =l copy %.5461 + storel %.5462, %.5460 + %.5463 =l add %.5423, 88 + %.5464 =l copy $g_185 + %.5465 =l mul 40, 1 + %.5466 =l add %.5464, %.5465 + %.5467 =l copy %.5466 + storel %.5467, %.5463 + %.5468 =l add %.5423, 96 + %.5469 =l extsw 0 + %.5470 =l copy %.5469 + storel %.5470, %.5468 + %.5471 =l add %.5423, 104 + %.5472 =l extsw 0 + %.5473 =l copy %.5472 + storel %.5473, %.5471 + %.5474 =l add %.5423, 112 + %.5475 =l copy $g_185 + %.5476 =l mul 40, 1 + %.5477 =l add %.5475, %.5476 + %.5478 =l copy %.5477 + storel %.5478, %.5474 + %.5479 =l add %.5423, 120 + %.5480 =l extsw 0 + %.5481 =l copy %.5480 + storel %.5481, %.5479 + %.5482 =l add %.5423, 128 + storel $g_50, %.5482 + %.5483 =l add %.5423, 136 + storel $g_50, %.5483 + %.5484 =l add %.5423, 144 + %.5485 =l extsw 9 + %.5486 =l mul %.5485, 4 + %.5487 =l add %.15, %.5486 + storel %.5487, %.5484 + %.5488 =l add %.5423, 152 + %.5489 =l extsw 0 + %.5490 =l copy %.5489 + storel %.5490, %.5488 + %.5491 =l add %.5423, 160 + %.5492 =l extsw 0 + %.5493 =l copy %.5492 + storel %.5493, %.5491 + %.5494 =l add %.5423, 168 + storel %.5179, %.5494 + %.5495 =l add %.5423, 176 + %.5496 =l extsw 7 + %.5497 =l mul %.5496, 12 + %.5498 =l add $g_13, %.5497 + %.5499 =l extsw 0 + %.5500 =l mul %.5499, 4 + %.5501 =l add %.5498, %.5500 + %.5502 =l extsw 0 + %.5503 =l mul %.5502, 4 + %.5504 =l add %.5501, %.5503 + storel %.5504, %.5495 + %.5505 =l add %.5423, 184 + storel %.5179, %.5505 + %.5506 =l add %.5423, 192 + %.5507 =l extsw 9 + %.5508 =l mul %.5507, 4 + %.5509 =l add %.15, %.5508 + storel %.5509, %.5506 + %.5510 =l add %.5423, 200 + %.5511 =l extsw 0 + %.5512 =l copy %.5511 + storel %.5512, %.5510 + %.5513 =l add %.5423, 208 + %.5514 =l extsw 0 + %.5515 =l copy %.5514 + storel %.5515, %.5513 + %.5516 =l add %.5423, 216 + %.5517 =l extsw 0 + %.5518 =l copy %.5517 + storel %.5518, %.5516 + %.5519 =l add %.5423, 224 + %.5520 =l extsw 9 + %.5521 =l mul %.5520, 4 + %.5522 =l add %.15, %.5521 + storel %.5522, %.5519 + %.5523 =l add %.5423, 232 + storel $g_50, %.5523 + %.5524 =l add %.5423, 240 + storel $g_50, %.5524 + %.5525 =l add %.5423, 248 + storel %.5179, %.5525 + %.5526 =l add %.5423, 256 + %.5527 =l extsw 0 + %.5528 =l copy %.5527 + storel %.5528, %.5526 + %.5529 =l add %.5423, 264 + %.5530 =l extsw 0 + %.5531 =l copy %.5530 + storel %.5531, %.5529 + %.5532 =l add %.5423, 272 + %.5533 =l extsw 7 + %.5534 =l mul %.5533, 12 + %.5535 =l add $g_13, %.5534 + %.5536 =l extsw 0 + %.5537 =l mul %.5536, 4 + %.5538 =l add %.5535, %.5537 + %.5539 =l extsw 0 + %.5540 =l mul %.5539, 4 + %.5541 =l add %.5538, %.5540 + storel %.5541, %.5532 + %.5542 =l add %.5423, 280 + %.5543 =l extsw 0 + %.5544 =l copy %.5543 + storel %.5544, %.5542 + %.5545 =l add %.5423, 288 + %.5546 =l extsw 0 + %.5547 =l copy %.5546 + storel %.5547, %.5545 + %.5548 =l add %.5423, 296 + %.5549 =l extsw 7 + %.5550 =l mul %.5549, 12 + %.5551 =l add $g_13, %.5550 + %.5552 =l extsw 0 + %.5553 =l mul %.5552, 4 + %.5554 =l add %.5551, %.5553 + %.5555 =l extsw 0 + %.5556 =l mul %.5555, 4 + %.5557 =l add %.5554, %.5556 + storel %.5557, %.5548 + %.5558 =l add %.5423, 304 + %.5559 =l extsw 0 + %.5560 =l copy %.5559 + storel %.5560, %.5558 + %.5561 =l add %.5423, 312 + %.5562 =l extsw 0 + %.5563 =l copy %.5562 + storel %.5563, %.5561 + %.5564 =l add %.5423, 320 + %.5565 =l extsw 9 + %.5566 =l mul %.5565, 4 + %.5567 =l add %.15, %.5566 + storel %.5567, %.5564 + %.5568 =l add %.5423, 328 + storel $g_50, %.5568 + %.5569 =l add %.5423, 336 + %.5570 =l extsw 7 + %.5571 =l mul %.5570, 12 + %.5572 =l add $g_13, %.5571 + %.5573 =l extsw 0 + %.5574 =l mul %.5573, 4 + %.5575 =l add %.5572, %.5574 + %.5576 =l extsw 0 + %.5577 =l mul %.5576, 4 + %.5578 =l add %.5575, %.5577 + storel %.5578, %.5569 + %.5579 =l add %.5423, 344 + %.5580 =l copy $g_185 + %.5581 =l mul 40, 1 + %.5582 =l add %.5580, %.5581 + %.5583 =l copy %.5582 + storel %.5583, %.5579 + %.5584 =l add %.5423, 352 + storel $g_50, %.5584 + %.5585 =l add %.5423, 360 + %.5586 =l extsw 9 + %.5587 =l mul %.5586, 4 + %.5588 =l add %.15, %.5587 + storel %.5588, %.5585 + %.5589 =l add %.5423, 368 + storel $g_50, %.5589 + %.5590 =l add %.5423, 376 + %.5591 =l copy $g_185 + %.5592 =l mul 40, 1 + %.5593 =l add %.5591, %.5592 + %.5594 =l copy %.5593 + storel %.5594, %.5590 + %.5595 =l add %.5423, 384 + %.5596 =l extsw 0 + %.5597 =l copy %.5596 + storel %.5597, %.5595 + %.5598 =l add %.5423, 392 + %.5599 =l copy $g_130 + %.5600 =l mul 0, 1 + %.5601 =l add %.5599, %.5600 + %.5602 =l copy %.5601 + storel %.5602, %.5598 + %.5603 =l add %.5423, 400 + %.5604 =l extsw 0 + %.5605 =l copy %.5604 + storel %.5605, %.5603 + %.5606 =l add %.5423, 408 + storel %.5179, %.5606 + %.5607 =l add %.5423, 416 + %.5608 =l copy $g_185 + %.5609 =l mul 40, 1 + %.5610 =l add %.5608, %.5609 + %.5611 =l copy %.5610 + storel %.5611, %.5607 + %.5612 =l add %.5423, 424 + %.5613 =l extsw 9 + %.5614 =l mul %.5613, 4 + %.5615 =l add %.15, %.5614 + storel %.5615, %.5612 + %.5616 =l add %.5423, 432 + %.5617 =l extsw 7 + %.5618 =l mul %.5617, 12 + %.5619 =l add $g_13, %.5618 + %.5620 =l extsw 0 + %.5621 =l mul %.5620, 4 + %.5622 =l add %.5619, %.5621 + %.5623 =l extsw 0 + %.5624 =l mul %.5623, 4 + %.5625 =l add %.5622, %.5624 + storel %.5625, %.5616 + %.5626 =l add %.5423, 440 + %.5627 =l extsw 7 + %.5628 =l mul %.5627, 12 + %.5629 =l add $g_13, %.5628 + %.5630 =l extsw 0 + %.5631 =l mul %.5630, 4 + %.5632 =l add %.5629, %.5631 + %.5633 =l extsw 0 + %.5634 =l mul %.5633, 4 + %.5635 =l add %.5632, %.5634 + storel %.5635, %.5626 + %.5636 =l add %.5423, 448 + %.5637 =l extsw 7 + %.5638 =l mul %.5637, 12 + %.5639 =l add $g_13, %.5638 + %.5640 =l extsw 0 + %.5641 =l mul %.5640, 4 + %.5642 =l add %.5639, %.5641 + %.5643 =l extsw 0 + %.5644 =l mul %.5643, 4 + %.5645 =l add %.5642, %.5644 + storel %.5645, %.5636 + %.5646 =l add %.5423, 456 + storel $g_50, %.5646 + %.5647 =l add %.5423, 464 + %.5648 =l extsw 9 + %.5649 =l mul %.5648, 4 + %.5650 =l add %.15, %.5649 + storel %.5650, %.5647 + %.5651 =l add %.5423, 472 + %.5652 =l extsw 9 + %.5653 =l mul %.5652, 4 + %.5654 =l add %.15, %.5653 + storel %.5654, %.5651 + %.5655 =l add %.5423, 480 + storel $g_50, %.5655 + %.5656 =l add %.5423, 488 + %.5657 =l extsw 7 + %.5658 =l mul %.5657, 12 + %.5659 =l add $g_13, %.5658 + %.5660 =l extsw 0 + %.5661 =l mul %.5660, 4 + %.5662 =l add %.5659, %.5661 + %.5663 =l extsw 0 + %.5664 =l mul %.5663, 4 + %.5665 =l add %.5662, %.5664 + storel %.5665, %.5656 + %.5666 =l add %.5423, 496 + %.5667 =l copy $g_185 + %.5668 =l mul 40, 1 + %.5669 =l add %.5667, %.5668 + %.5670 =l copy %.5669 + storel %.5670, %.5666 + %.5671 =l add %.5423, 504 + storel $g_50, %.5671 + %.5672 =l add %.5423, 512 + %.5673 =l extsw 7 + %.5674 =l mul %.5673, 12 + %.5675 =l add $g_13, %.5674 + %.5676 =l extsw 0 + %.5677 =l mul %.5676, 4 + %.5678 =l add %.5675, %.5677 + %.5679 =l extsw 0 + %.5680 =l mul %.5679, 4 + %.5681 =l add %.5678, %.5680 + storel %.5681, %.5672 + %.5682 =l add %.5423, 520 + %.5683 =l extsw 0 + %.5684 =l copy %.5683 + storel %.5684, %.5682 + %.5685 =l add %.5423, 528 + %.5686 =l extsw 0 + %.5687 =l copy %.5686 + storel %.5687, %.5685 + %.5688 =l add %.5423, 536 + storel $g_50, %.5688 + %.5689 =l add %.5423, 544 + %.5690 =l copy $g_185 + %.5691 =l mul 40, 1 + %.5692 =l add %.5690, %.5691 + %.5693 =l copy %.5692 + storel %.5693, %.5689 + %.5694 =l add %.5423, 552 + storel $g_50, %.5694 + %.5695 =l add %.5423, 560 + %.5696 =l extsw 0 + %.5697 =l copy %.5696 + storel %.5697, %.5695 + %.5698 =l add %.5423, 568 + %.5699 =l extsw 0 + %.5700 =l copy %.5699 + storel %.5700, %.5698 + %.5701 =l add %.5423, 576 + %.5702 =l extsw 0 + %.5703 =l copy %.5702 + storel %.5703, %.5701 + %.5704 =l add %.5423, 584 + %.5705 =l extsw 0 + %.5706 =l copy %.5705 + storel %.5706, %.5704 + %.5707 =l add %.5423, 592 + %.5708 =l extsw 9 + %.5709 =l mul %.5708, 4 + %.5710 =l add %.15, %.5709 + storel %.5710, %.5707 + %.5711 =l add %.5423, 600 + storel $g_50, %.5711 + %.5712 =l add %.5423, 608 + storel $g_50, %.5712 + %.5713 =l add %.5423, 616 + storel %.5179, %.5713 + %.5714 =l add %.5423, 624 + storel %.5179, %.5714 + %.5715 =l add %.5423, 632 + storel $g_50, %.5715 + %.5716 =l add %.5423, 640 + %.5717 =l extsw 9 + %.5718 =l mul %.5717, 4 + %.5719 =l add %.15, %.5718 + storel %.5719, %.5716 + %.5720 =l add %.5423, 648 + storel $g_50, %.5720 + %.5721 =l add %.5423, 656 + storel $g_50, %.5721 + %.5722 =l add %.5423, 664 + %.5723 =l extsw 9 + %.5724 =l mul %.5723, 4 + %.5725 =l add %.15, %.5724 + storel %.5725, %.5722 + %.5726 =l add %.5423, 672 + %.5727 =l extsw 0 + %.5728 =l copy %.5727 + storel %.5728, %.5726 + %.5729 =l add %.5423, 680 + %.5730 =l extsw 0 + %.5731 =l copy %.5730 + storel %.5731, %.5729 + %.5732 =l add %.5423, 688 + storel %.5179, %.5732 + %.5733 =l add %.5423, 696 + %.5734 =l extsw 7 + %.5735 =l mul %.5734, 12 + %.5736 =l add $g_13, %.5735 + %.5737 =l extsw 0 + %.5738 =l mul %.5737, 4 + %.5739 =l add %.5736, %.5738 + %.5740 =l extsw 0 + %.5741 =l mul %.5740, 4 + %.5742 =l add %.5739, %.5741 + storel %.5742, %.5733 + %.5743 =l add %.5423, 704 + %.5744 =l extsw 0 + %.5745 =l copy %.5744 + storel %.5745, %.5743 + %.5746 =l add %.5423, 712 + %.5747 =l extsw 9 + %.5748 =l mul %.5747, 4 + %.5749 =l add %.15, %.5748 + storel %.5749, %.5746 + %.5750 =l add %.5423, 720 + %.5751 =l extsw 9 + %.5752 =l mul %.5751, 4 + %.5753 =l add %.15, %.5752 + storel %.5753, %.5750 + %.5754 =l add %.5423, 728 + storel %.5179, %.5754 + %.5755 =l add %.5423, 736 + %.5756 =l extsw 9 + %.5757 =l mul %.5756, 4 + %.5758 =l add %.15, %.5757 + storel %.5758, %.5755 + %.5759 =l add %.5423, 744 + %.5760 =l extsw 9 + %.5761 =l mul %.5760, 4 + %.5762 =l add %.15, %.5761 + storel %.5762, %.5759 + %.5763 =l add %.5423, 752 + %.5764 =l extsw 0 + %.5765 =l copy %.5764 + storel %.5765, %.5763 + %.5766 =l add %.5423, 760 + %.5767 =l copy $g_185 + %.5768 =l mul 40, 1 + %.5769 =l add %.5767, %.5768 + %.5770 =l copy %.5769 + storel %.5770, %.5766 + %.5771 =l add %.5423, 768 + %.5772 =l extsw 9 + %.5773 =l mul %.5772, 4 + %.5774 =l add %.15, %.5773 + storel %.5774, %.5771 + %.5775 =l add %.5423, 776 + %.5776 =l extsw 9 + %.5777 =l mul %.5776, 4 + %.5778 =l add %.15, %.5777 + storel %.5778, %.5775 + %.5779 =l add %.5423, 784 + %.5780 =l extsw 0 + %.5781 =l copy %.5780 + storel %.5781, %.5779 + %.5782 =l add %.5423, 792 + %.5783 =l copy $g_185 + %.5784 =l mul 40, 1 + %.5785 =l add %.5783, %.5784 + %.5786 =l copy %.5785 + storel %.5786, %.5782 + %.5787 =l add %.5423, 800 + %.5788 =l extsw 0 + %.5789 =l copy %.5788 + storel %.5789, %.5787 + %.5790 =l add %.5423, 808 + %.5791 =l extsw 0 + %.5792 =l copy %.5791 + storel %.5792, %.5790 + %.5793 =l add %.5423, 816 + %.5794 =l copy $g_185 + %.5795 =l mul 40, 1 + %.5796 =l add %.5794, %.5795 + %.5797 =l copy %.5796 + storel %.5797, %.5793 + %.5798 =l add %.5423, 824 + %.5799 =l extsw 0 + %.5800 =l copy %.5799 + storel %.5800, %.5798 + %.5801 =l add %.5423, 832 + storel $g_50, %.5801 + %.5802 =l add %.5423, 840 + storel $g_50, %.5802 + %.5803 =l add %.5423, 848 + %.5804 =l extsw 9 + %.5805 =l mul %.5804, 4 + %.5806 =l add %.15, %.5805 + storel %.5806, %.5803 + %.5807 =l add %.5423, 856 + %.5808 =l extsw 0 + %.5809 =l copy %.5808 + storel %.5809, %.5807 + %.5810 =l add %.5423, 864 + %.5811 =l extsw 0 + %.5812 =l copy %.5811 + storel %.5812, %.5810 + %.5813 =l add %.5423, 872 + storel %.5179, %.5813 + %.5814 =l add %.5423, 880 + %.5815 =l extsw 7 + %.5816 =l mul %.5815, 12 + %.5817 =l add $g_13, %.5816 + %.5818 =l extsw 0 + %.5819 =l mul %.5818, 4 + %.5820 =l add %.5817, %.5819 + %.5821 =l extsw 0 + %.5822 =l mul %.5821, 4 + %.5823 =l add %.5820, %.5822 + storel %.5823, %.5814 + %.5824 =l add %.5423, 888 + storel %.5179, %.5824 + %.5825 =l add %.5423, 896 + %.5826 =l extsw 9 + %.5827 =l mul %.5826, 4 + %.5828 =l add %.15, %.5827 + storel %.5828, %.5825 + %.5829 =l add %.5423, 904 + %.5830 =l extsw 0 + %.5831 =l copy %.5830 + storel %.5831, %.5829 + %.5832 =l add %.5423, 912 + %.5833 =l extsw 0 + %.5834 =l copy %.5833 + storel %.5834, %.5832 + %.5835 =l add %.5423, 920 + %.5836 =l extsw 0 + %.5837 =l copy %.5836 + storel %.5837, %.5835 + %.5838 =l add %.5423, 928 + %.5839 =l extsw 9 + %.5840 =l mul %.5839, 4 + %.5841 =l add %.15, %.5840 + storel %.5841, %.5838 + %.5842 =l add %.5423, 936 + storel $g_50, %.5842 + %.5843 =l add %.5423, 944 + storel $g_50, %.5843 + %.5844 =l add %.5423, 952 + storel %.5179, %.5844 + storew 0, %.5846 +@for_cond.902 + %.5849 =w loadsw %.5846 + %.5850 =w csltw %.5849, 4 + jnz %.5850, @for_body.903, @for_join.905 +@for_body.903 + %.5851 =w copy 857976620 + %.5852 =w loadsw %.5846 + %.5853 =l extsw %.5852 + %.5854 =l mul %.5853, 4 + %.5855 =l add %.5845, %.5854 + storew %.5851, %.5855 +@for_cont.904 + %.5856 =w loadsw %.5846 + %.5857 =w add %.5856, 1 + storew %.5857, %.5846 + jmp @for_cond.902 +@for_join.905 + %.5858 =w copy 0 + %.5859 =l copy $g_794 + %.5860 =l mul 4, 1 + %.5861 =l add %.5859, %.5860 + %.5862 =l copy %.5861 + storew %.5858, %.5862 +@for_cond.906 + %.5863 =l copy $g_794 + %.5864 =l mul 4, 1 + %.5865 =l add %.5863, %.5864 + %.5866 =l copy %.5865 + %.5867 =w loaduw %.5866 + %.5868 =w copy 1 + %.5869 =w culew %.5867, %.5868 + jnz %.5869, @for_body.907, @for_join.909 +@for_body.907 + %.5870 =l copy %.5265 + %.5871 =l mul 36, 1 + %.5872 =l add %.5870, %.5871 + %.5873 =l copy %.5872 + %.5874 =w loaduw %.5873 + %.5875 =w cnew %.5874, 0 + jnz %.5875, @if_true.910, @if_false.911 +@if_true.910 + jmp @for_join.909 +@if_false.911 +@for_cont.908 + %.5876 =l copy $g_794 + %.5877 =l mul 4, 1 + %.5878 =l add %.5876, %.5877 + %.5879 =l copy %.5878 + %.5880 =w loaduw %.5879 + %.5881 =w copy 1 + %.5882 =w add %.5880, %.5881 + storew %.5882, %.5879 + jmp @for_cond.906 +@for_join.909 + %.5883 =l extsw 3 + %.5884 =l mul %.5883, 4 + %.5885 =l add %.5845, %.5884 + %.5886 =w loaduw %.5885 + %.5887 =w sub %.5886, 1 + storew %.5887, %.5885 + %.5888 =l loadl %.1 + %.5889 =w loadsw %.5888 + %.5890 =l loadl $g_23 + %.5891 =w loadsw %.5890 + %.5892 =w or %.5889, %.5891 + storew %.5892, %.5888 +@for_cont.900 + %.5893 =l copy %.5265 + %.5894 =l mul 48, 1 + %.5895 =l add %.5893, %.5894 + %.5896 =l copy %.5895 + %.5897 =w loadsw %.5896 + %.5898 =w add %.5897, 1 + storew %.5898, %.5896 + jmp @for_cond.898 +@for_join.901 + %.5899 =l loadl $g_296 + %.5900 =l loadl %.5899 + %.5901 =l copy %.5265 + %.5902 =l mul 32, 1 + %.5903 =l add %.5901, %.5902 + %.5904 =l copy %.5903 + %.5905 =w loaduw %.5904 + %.5906 =l extuw %.5905 + %.5907 =w csltl 49, %.5906 + %.5908 =w copy %.5907 + %.5909 =l copy %.5265 + %.5910 =l mul 48, 1 + %.5911 =l add %.5909, %.5910 + %.5912 =l copy %.5911 + %.5913 =w loadsw %.5912 + %.5914 =l loadl %.5297 + %.5915 =l extsw 0 + %.5916 =l mul %.5915, 40 + %.5917 =l add $g_1615, %.5916 + %.5918 =l extsw 1 + %.5919 =l mul %.5918, 8 + %.5920 =l add %.5917, %.5919 + storel %.5914, %.5920 + %.5921 =l extsw 0 + %.5922 =w cnel %.5914, %.5921 + %.5923 =w copy %.5922 + %.5924 =w call $safe_mul_func_int8_t_s_s(w %.5908, w %.5923) + %.5925 =w extsb %.5924 + storel %.64, $g_1752 + %.5926 =l loadl %.1 + %.5927 =w loadsw %.5926 + %.5928 =l extsw %.5927 + %.5929 =l copy $g_1183 + %.5930 =l mul 8, 1 + %.5931 =l add %.5929, %.5930 + %.5932 =l copy %.5931 + storel %.5928, %.5932 + %.5933 =l loadl $g_1983 + %.5934 =w ceql %.64, %.5933 + %.5935 =w xor %.5925, %.5934 + %.5936 =l loadl %.1 + storew %.5935, %.5936 + %.5937 =w cnew %.5935, 0 + jnz %.5937, @if_true.912, @if_false.913 +@if_true.912 + %.5939 =l add %.5938, 0 + %.5940 =w copy 40695 + storeh %.5940, %.5939 + %.5942 =l add %.5941, 0 + %.5943 =w copy 1497767668 + storew %.5943, %.5942 + %.5945 =l add %.5944, 0 + %.5946 =w copy 136 + storeb %.5946, %.5945 + %.5948 =l add %.5947, 0 + storel $g_296, %.5948 + %.5950 =l add %.5949, 0 + %.5951 =w copy 3083152646 + storew %.5951, %.5950 + %.5953 =l add %.5952, 0 + %.5954 =w copy 3152178012 + storew %.5954, %.5953 + %.5956 =l add %.5955, 0 + %.5957 =w copy 5 + storew %.5957, %.5956 + %.5958 =l add %.5955, 4 + %.5959 =w copy 505745575 + storew %.5959, %.5958 + %.5960 =l add %.5955, 8 + %.5961 =w copy 4707 + storeh %.5961, %.5960 + %.5962 =l add %.5955, 10 + storeh 0, %.5962 + %.5963 =l add %.5955, 12 + %.5964 =w copy 0 + storew %.5964, %.5963 + %.5965 =l add %.5955, 16 + %.5966 =w copy 1 + storew %.5966, %.5965 + %.5967 =w copy 0 + %.5968 =l copy $g_518 + %.5969 =l mul 0, 1 + %.5970 =l add %.5968, %.5969 + %.5971 =l copy %.5970 + storeb %.5967, %.5971 +@for_cond.914 + %.5972 =l copy $g_518 + %.5973 =l mul 0, 1 + %.5974 =l add %.5972, %.5973 + %.5975 =l copy %.5974 + %.5976 =w loadub %.5975 + %.5977 =w extub %.5976 + %.5978 =w cslew %.5977, 0 + jnz %.5978, @for_body.915, @for_join.917 +@for_body.915 + %.5980 =l add %.5979, 0 + %.5981 =w copy 18446744073709551612 + storew %.5981, %.5980 + %.5983 =l add %.5982, 0 + %.5984 =w copy 3939614397 + storew %.5984, %.5983 + %.5986 =l add %.5985, 0 + %.5987 =l extsw 0 + %.5988 =l copy %.5987 + storel %.5988, %.5986 + %.5989 =l add %.5985, 8 + %.5990 =l extsw 0 + %.5991 =l copy %.5990 + storel %.5991, %.5989 + %.5992 =l add %.5985, 16 + %.5993 =l extsw 0 + %.5994 =l copy %.5993 + storel %.5994, %.5992 + %.5995 =l add %.5985, 24 + %.5996 =l extsw 0 + %.5997 =l copy %.5996 + storel %.5997, %.5995 + %.5998 =l add %.5985, 32 + %.5999 =l extsw 0 + %.6000 =l copy %.5999 + storel %.6000, %.5998 + %.6001 =l add %.5985, 40 + %.6002 =l extsw 0 + %.6003 =l copy %.6002 + storel %.6003, %.6001 + %.6004 =l add %.5985, 48 + %.6005 =l extsw 0 + %.6006 =l copy %.6005 + storel %.6006, %.6004 + %.6007 =l add %.5985, 56 + %.6008 =l extsw 0 + %.6009 =l copy %.6008 + storel %.6009, %.6007 + %.6011 =l add %.6010, 0 + %.6012 =l extsw 1 + %.6013 =l mul %.6012, 2 + %.6014 =l add %.51, %.6013 + storel %.6014, %.6011 + %.6016 =w loadub %.5187 + %.6017 =w extub %.6016 + %.6018 =w cnew %.6017, 0 + jnz %.6018, @logic_right.918, @logic_join.919 +@logic_right.918 + %.6019 =l copy %.5265 + %.6020 =l mul 40, 1 + %.6021 =l add %.6019, %.6020 + %.6022 =l copy %.6021 + %.6023 =w loadsw %.6022 + %.6024 =w copy %.6023 + %.6025 =w copy 0 + %.6026 =l loadl $g_1590 + storeh %.6025, %.6026 + %.6027 =w extuh %.6025 + %.6028 =w cnew %.6027, 0 + jnz %.6028, @logic_join.925, @logic_right.924 +@logic_right.924 + %.6029 =l loadl %.5226 + %.6030 =w loadub %.6029 + %.6031 =w sub %.6030, 1 + storeb %.6031, %.6029 + %.6032 =w loaduw %.5979 + %.6033 =w copy %.6032 + %.6034 =w copy 13288484106753422136 + storew %.6034, %.5982 + %.6035 =l copy %.5265 + %.6036 =l mul 16, 1 + %.6037 =l add %.6035, %.6036 + %.6038 =l copy %.6037 + %.6039 =w loadsw %.6038 + %.6040 =w copy %.6039 + %.6041 =w loaduw %.5979 + %.6042 =w loadsw %.5941 + %.6043 =l extsw %.6042 + %.6044 =w loadsw %.5179 + %.6045 =l loadl $g_1590 + %.6046 =w loaduh %.6045 + %.6047 =l loadl %.83 + %.6048 =w loaduw %.5979 + %.6049 =l extuw %.6048 + %.6050 =l or 61837, %.6049 + %.6051 =w copy %.6050 + %.6052 =l loadl $g_422 + %.6053 =w loaduw %.6052 + %.6054 =w copy %.6053 + %.6055 =w call $safe_div_func_int32_t_s_s(w %.6051, w %.6054) + %.6056 =l loadl %.83 + %.6057 =w cnel %.6047, %.6056 + %.6058 =w loaduh %.5938 + %.6059 =w extuh %.6058 + %.6060 =w cnew %.6059, 0 + jnz %.6060, @logic_join.929, @logic_right.928 +@logic_right.928 + %.6061 =w cnel 7, 0 +@logic_join.929 + %.6062 =w phi @logic_right.924 %.6060, @logic_right.928 %.6061 + storew %.6062, %.5182 + %.6063 =l loadl $g_1590 + %.6064 =w loaduh %.6063 + %.6065 =w extuh %.6064 + %.6066 =w call $safe_lshift_func_uint16_t_u_u(w %.6046, w %.6065) + %.6067 =w extuh %.6066 + %.6068 =w cnew %.6067, 0 + jnz %.6068, @logic_join.927, @logic_right.926 +@logic_right.926 + %.6069 =w loaduw %.5979 + %.6070 =w cnew %.6069, 0 +@logic_join.927 + %.6071 =w phi @logic_join.929 %.6068, @logic_right.926 %.6070 + %.6072 =w copy %.6071 + %.6073 =l loadl $g_1038 + %.6074 =l loadl %.6073 + %.6075 =w loaduw %.6074 + %.6076 =w cugtw %.6072, %.6075 + %.6077 =l extsw %.6076 + %.6078 =w csgel %.6077, 12088 + %.6079 =w and %.6044, %.6078 + storew %.6079, %.5179 + %.6080 =l extsw %.6079 + %.6081 =l call $safe_add_func_int64_t_s_s(l %.6080, l 4276973671381511902) + %.6082 =l loadl %.1 + %.6083 =w loadsw %.6082 + %.6084 =l extsw %.6083 + %.6085 =l or %.6081, %.6084 + %.6086 =l copy 33187 + %.6087 =l and %.6085, %.6086 + %.6088 =l and %.6043, %.6087 + %.6089 =w copy %.6088 + storew %.6089, %.5941 + %.6090 =w copy %.6089 + %.6091 =w and %.6041, %.6090 + %.6092 =w loaduw %.5176 + %.6093 =w or %.6040, %.6092 + %.6094 =w copy %.6093 + storew %.6094, %.6038 + %.6095 =w xor %.6034, %.6094 + %.6096 =w copy %.6095 + %.6097 =w call $safe_mod_func_int16_t_s_s(w %.6033, w %.6096) + %.6098 =w copy %.6097 + %.6099 =w call $safe_add_func_uint8_t_u_u(w %.6031, w %.6098) + %.6100 =w extub %.6099 + %.6101 =w cnew %.6100, 0 +@logic_join.925 + %.6102 =w phi @logic_right.918 %.6028, @logic_join.927 %.6101 + %.6103 =w cnew %.6102, 0 + jnz %.6103, @logic_join.923, @logic_right.922 +@logic_right.922 + %.6104 =w loaduh %.5938 + %.6105 =w extuh %.6104 + %.6106 =w cnew %.6105, 0 +@logic_join.923 + %.6107 =w phi @logic_join.925 %.6103, @logic_right.922 %.6106 + %.6108 =w copy %.6107 + %.6109 =w loaduh %.5938 + %.6110 =w extuh %.6109 + %.6111 =w call $safe_sub_func_uint32_t_u_u(w %.6108, w %.6110) + %.6112 =l loadl %.1 + %.6113 =w loadsw %.6112 + %.6114 =w copy %.6113 + %.6115 =w culew %.6111, %.6114 + %.6116 =w copy %.6115 + %.6117 =w copy 221 + %.6118 =w call $safe_mul_func_int8_t_s_s(w %.6116, w %.6117) + %.6119 =w extsb %.6118 + %.6120 =w loaduw %.5979 + %.6121 =w xor %.6119, %.6120 + %.6122 =w copy %.6121 + %.6123 =w loadub %.5187 + %.6124 =w extub %.6123 + %.6125 =w call $safe_lshift_func_int8_t_s_u(w %.6122, w %.6124) + %.6126 =w extsb %.6125 + %.6127 =w call $safe_div_func_int16_t_s_s(w %.6024, w %.6126) + %.6128 =w extsh %.6127 + %.6129 =w loaduh %.5938 + %.6130 =w extuh %.6129 + %.6131 =w csltw %.6128, %.6130 + %.6132 =l loadl $g_1038 + %.6133 =l loadl %.6132 + %.6134 =w loaduw %.6133 + %.6135 =w loaduw $g_2013 + %.6136 =w or %.6134, %.6135 + %.6137 =w cnew %.6136, 0 + jnz %.6137, @logic_join.921, @logic_right.920 +@logic_right.920 + %.6138 =w loadsw %.5982 + %.6139 =w cnew %.6138, 0 +@logic_join.921 + %.6140 =w phi @logic_join.923 %.6137, @logic_right.920 %.6139 + %.6141 =l copy %.5265 + %.6142 =l mul 24, 1 + %.6143 =l add %.6141, %.6142 + %.6144 =l copy %.6143 + %.6145 =l loadl %.6144 + %.6146 =w copy %.6145 + %.6147 =w call $safe_mod_func_int32_t_s_s(w %.6140, w %.6146) + %.6148 =w loadsb %.5944 + %.6149 =w extsb %.6148 + %.6150 =w and %.6147, %.6149 + %.6151 =l loadl $g_173 + %.6152 =w loadsw %.6151 + %.6153 =l extsw %.6152 + %.6154 =w culel %.6153, 4294967292 + %.6155 =w cnew %.6154, 0 +@logic_join.919 + %.6156 =w phi @for_body.915 %.6018, @logic_join.921 %.6155 + %.6157 =l loadl $g_173 + storew %.6156, %.6157 + %.6158 =l loadl $g_1983 + %.6159 =l loadl %.6158 + %.6160 =l loadl %.6159 + %.6161 =l loadl %.6160 + %.6162 =l loadl %.6161 + %.6163 =w loadsw %.5941 + %.6164 =l extsw 0 + %.6165 =l sub %.6164, 9 + %.6166 =w copy %.6165 + %.6167 =w call $safe_lshift_func_int8_t_s_s(w %.6166, w 4) + %.6168 =w extsb %.6167 + %.6169 =w cnew %.6168, 0 + jnz %.6169, @logic_join.933, @logic_right.932 +@logic_right.932 + %.6170 =l loadl %.5301 + %.6171 =w copy 46190 + %.6172 =w call $safe_rshift_func_int16_t_s_s(w %.6171, w 4) + %.6173 =w extsh %.6172 + %.6174 =l extsw 0 + %.6175 =l loadl $g_1705 + %.6176 =l loadl %.6175 + %.6177 =w ceql %.6174, %.6176 + %.6178 =w xor %.6173, %.6177 + %.6179 =w copy %.6178 + %.6180 =l loadl $g_1313 + %.6181 =l loadl %.6180 + %.6182 =l loadl %.6181 + %.6183 =l loadl %.6182 + storew %.6179, %.6183 + %.6184 =l copy %.5265 + %.6185 =l mul 32, 1 + %.6186 =l add %.6184, %.6185 + %.6187 =l copy %.6186 + %.6188 =w loaduw %.6187 + %.6189 =w loadub %.5187 + %.6190 =w extub %.6189 + %.6191 =w or %.6188, %.6190 + %.6192 =w call $safe_mod_func_uint32_t_u_u(w %.6179, w %.6191) + %.6193 =w cnel 194, 0 + jnz %.6193, @logic_join.935, @logic_right.934 +@logic_right.934 + %.6194 =w cnel 30, 0 +@logic_join.935 + %.6195 =w phi @logic_right.932 %.6193, @logic_right.934 %.6194 + %.6196 =w copy %.6195 + %.6197 =l loadl %.6010 + storeh %.6196, %.6197 + %.6198 =l extsw 0 + %.6199 =w cnel %.6170, %.6198 + %.6200 =l copy %.5265 + %.6201 =l mul 40, 1 + %.6202 =l add %.6200, %.6201 + %.6203 =l copy %.6202 + %.6204 =w loadsw %.6203 + %.6205 =w csgtw %.6199, %.6204 + %.6206 =w cnew %.6205, 0 +@logic_join.933 + %.6207 =w phi @logic_join.919 %.6169, @logic_join.935 %.6206 + %.6208 =l extsw %.6207 + %.6209 =w loaduw %.5176 + %.6210 =l extuw %.6209 + %.6211 =l call $safe_div_func_uint64_t_u_u(l %.6208, l %.6210) + %.6212 =l copy 5 + %.6213 =w cugtl %.6211, %.6212 + %.6214 =w cnew %.6213, 0 + jnz %.6214, @logic_join.931, @logic_right.930 +@logic_right.930 + %.6215 =l loadl $g_2028 + %.6216 =w cnel %.6215, 0 +@logic_join.931 + %.6217 =w phi @logic_join.933 %.6214, @logic_right.930 %.6216 + %.6218 =w or %.6163, %.6217 + %.6219 =l extsw %.6218 + %.6220 =l copy $g_1183 + %.6221 =l mul 8, 1 + %.6222 =l add %.6220, %.6221 + %.6223 =l copy %.6222 + storel %.6219, %.6223 + %.6224 =w copy %.6219 + storew %.6224, %.5941 + %.6225 =l loadl %.5190 + %.6226 =w ceql %.6162, %.6225 + %.6227 =w loadsw %.5982 + %.6228 =w csgtw %.6226, %.6227 + %.6229 =w cnew %.6228, 0 + jnz %.6229, @if_true.936, @if_false.937 +@if_true.936 + %.6230 =w loadsw %.5941 + %.6231 =l loadl %.5295 + %.6232 =w loaduh %.6231 + %.6233 =l extuh %.6232 + %.6234 =l xor 7, 1973726022 + %.6235 =l or %.6233, %.6234 + %.6236 =w copy %.6235 + storeh %.6236, %.6231 + %.6237 =w call $safe_lshift_func_uint16_t_u_s(w %.6236, w 10) + %.6238 =w extuh %.6237 + %.6239 =l loadl $g_173 + storew %.6238, %.6239 + %.6240 =w or %.6230, %.6238 + storew %.6240, %.5941 + jmp @if_join.938 +@if_false.937 + %.6241 =l loadl %.5307 + storel %.6241, %.5313 + %.6242 =l loadl %.5313 + %.6243 =w loadsw %.6242 + %.6244 =w copy %.6243 + %.6245 =w copy 9 + %.6246 =l loadl $g_1038 + %.6247 =l loadl %.6246 + storew %.6245, %.6247 + %.6248 =w loaduh %.5938 + %.6249 =w extuh %.6248 + %.6250 =w xor %.6245, %.6249 + %.6251 =w xor %.6244, %.6250 + %.6252 =w copy %.6251 + storew %.6252, %.6242 + %.6253 =w loadsw %.5182 + %.6254 =l extsw %.6253 + ret %.6254 +@if_join.938 + storel %.5182, %.5313 +@for_cont.916 + %.6255 =l copy $g_518 + %.6256 =l mul 0, 1 + %.6257 =l add %.6255, %.6256 + %.6258 =l copy %.6257 + %.6259 =w loadub %.6258 + %.6260 =w extub %.6259 + %.6261 =w add %.6260, 1 + %.6262 =w copy %.6261 + storeb %.6262, %.6258 + jmp @for_cond.914 +@for_join.917 + %.6263 =w copy 0 + storeb %.6263, $g_937 +@for_cond.939 + %.6264 =w loadsb $g_937 + %.6265 =w extsb %.6264 + %.6266 =w cslew %.6265, 0 + jnz %.6266, @for_body.940, @for_join.942 +@for_body.940 + %.6268 =l add %.6267, 0 + %.6269 =w copy 0 + storew %.6269, %.6268 + %.6270 =l add %.6267, 4 + %.6271 =w copy 18446744073709551615 + storew %.6271, %.6270 + %.6272 =l add %.6267, 8 + %.6273 =w copy 2804 + storeh %.6273, %.6272 + %.6274 =l add %.6267, 10 + storeh 0, %.6274 + %.6275 =l add %.6267, 12 + %.6276 =w copy 1575345085 + storew %.6276, %.6275 + %.6277 =l add %.6267, 16 + %.6278 =w copy 8 + storew %.6278, %.6277 + %.6280 =l add %.6279, 0 + %.6281 =l copy %.5265 + %.6282 =l mul 44, 1 + %.6283 =l add %.6281, %.6282 + %.6284 =l copy %.6283 + storel %.6284, %.6280 + %.6286 =l add %.6285, 0 + %.6287 =w copy 2173313342 + storew %.6287, %.6286 + %.6288 =l loadl $g_23 + %.6289 =w loadsw %.6288 + %.6290 =l extsw 4 + %.6291 =l mul %.6290, 1 + %.6292 =l add $g_132, %.6291 + %.6293 =w loadsb %.6292 + %.6294 =w extsb %.6293 + %.6295 =w copy 65534 + %.6296 =w loaduh %.5938 + %.6297 =l extuh %.6296 + %.6298 =l copy $g_1183 + %.6299 =l mul 32, 1 + %.6300 =l add %.6298, %.6299 + %.6301 =l copy %.6300 + %.6302 =w loaduw %.6301 + %.6303 =w copy %.6302 + %.6304 =w call $safe_lshift_func_int8_t_s_s(w %.6303, w 5) + %.6305 =w extsb %.6304 + %.6306 =w loadsb $g_1130 + %.6307 =w extsb %.6306 + %.6308 =w loaduw %.5319 + %.6309 =w copy %.6308 + %.6310 =w loadsw %.5179 + %.6311 =w copy %.6310 + %.6312 =w copy 30 + %.6313 =w call $safe_sub_func_uint8_t_u_u(w %.6311, w %.6312) + %.6314 =l copy $g_265 + %.6315 =l mul 36, 1 + %.6316 =l add %.6314, %.6315 + %.6317 =l copy %.6316 + %.6318 =w loaduw %.6317 + %.6319 =w copy %.6318 + %.6320 =w call $safe_lshift_func_uint8_t_u_s(w %.6313, w %.6319) + %.6321 =w extub %.6320 + %.6322 =w loadsw %.5941 + %.6323 =w cnew %.6322, 0 + jnz %.6323, @logic_right.947, @logic_join.948 +@logic_right.947 + %.6324 =w cnel 17356307903983304843, 0 +@logic_join.948 + %.6325 =w phi @for_body.940 %.6323, @logic_right.947 %.6324 + %.6326 =l copy %.6267 + %.6327 =l mul 8, 1 + %.6328 =l add %.6326, %.6327 + %.6329 =l copy %.6328 + %.6330 =w loadsh %.6329 + %.6331 =w extsh %.6330 + %.6332 =w cslew %.6321, %.6331 + %.6333 =w cnew %.6332, 0 + jnz %.6333, @logic_join.946, @logic_right.945 +@logic_right.945 + %.6334 =l copy %.6267 + %.6335 =l mul 16, 1 + %.6336 =l add %.6334, %.6335 + %.6337 =l copy %.6336 + %.6338 =w loaduw %.6337 + %.6339 =w cnew %.6338, 0 +@logic_join.946 + %.6340 =w phi @logic_join.948 %.6333, @logic_right.945 %.6339 + %.6341 =l extsw %.6340 + %.6342 =l copy $g_185 + %.6343 =l mul 8, 1 + %.6344 =l add %.6342, %.6343 + %.6345 =l copy %.6344 + %.6346 =l loadl %.6345 + %.6347 =l xor %.6341, %.6346 + %.6348 =w copy %.6347 + %.6349 =w call $safe_lshift_func_int16_t_s_u(w %.6309, w %.6348) + %.6350 =w loadsw %.5179 + %.6351 =w call $safe_rshift_func_int16_t_s_s(w %.6349, w %.6350) + %.6352 =l extsh %.6351 + %.6353 =l xor %.6352, 0 + %.6354 =l loadl $g_173 + %.6355 =w loadsw %.6354 + %.6356 =l extsw %.6355 + %.6357 =w ceql %.6353, %.6356 + %.6358 =w loadub %.5187 + %.6359 =w extub %.6358 + %.6360 =w csltw %.6357, %.6359 + %.6361 =l loadl $g_1313 + %.6362 =l loadl %.6361 + %.6363 =l loadl %.6362 + %.6364 =l loadl %.6363 + %.6365 =w loaduw %.6364 + %.6366 =w xor %.6365, 18446744073709551615 + %.6367 =w and %.6307, %.6366 + %.6368 =w copy %.6367 + storeb %.6368, $g_1130 + %.6369 =l loadl $g_422 + %.6370 =w loaduw %.6369 + %.6371 =w copy %.6370 + %.6372 =w call $safe_mod_func_int32_t_s_s(w %.6305, w %.6371) + %.6373 =l extsw %.6372 + %.6374 =l copy $g_518 + %.6375 =l mul 8, 1 + %.6376 =l add %.6374, %.6375 + %.6377 =l copy %.6376 + %.6378 =l loadl %.6377 + %.6379 =l copy %.6378 + %.6380 =l call $safe_sub_func_uint64_t_u_u(l %.6373, l %.6379) + %.6381 =w cugel %.6297, %.6380 + %.6382 =w copy %.6381 + %.6383 =w call $safe_mul_func_uint16_t_u_u(w %.6295, w %.6382) + %.6384 =w extuh %.6383 + %.6385 =l extsw 0 + %.6386 =l mul %.6385, 4 + %.6387 =l add %.5192, %.6386 + %.6388 =w loadsw %.6387 + %.6389 =w ceqw %.6384, %.6388 + %.6390 =w xor %.6294, %.6389 + %.6391 =w copy %.6390 + storeb %.6391, %.6292 + %.6392 =l loadl $g_2028 + %.6393 =w copy %.6392 + %.6394 =w call $safe_mod_func_int8_t_s_s(w %.6391, w %.6393) + %.6395 =w extsb %.6394 + %.6396 =w cnew %.6395, 0 + jnz %.6396, @logic_right.943, @logic_join.944 +@logic_right.943 + %.6397 =l loadl %.5307 + %.6398 =w loadsw %.6397 + %.6399 =w cnew %.6398, 0 +@logic_join.944 + %.6400 =w phi @logic_join.946 %.6396, @logic_right.943 %.6399 + %.6401 =l copy %.6267 + %.6402 =l mul 12, 1 + %.6403 =l add %.6401, %.6402 + %.6404 =l copy %.6403 + %.6405 =w loadsw %.6404 + %.6406 =w and %.6289, %.6405 + storew %.6406, %.6288 + %.6407 =l loadl %.6279 + storel %.6407, %.6279 + %.6408 =w copy 0 + storew %.6408, %.61 +@for_cond.949 + %.6409 =w loaduw %.61 + %.6410 =w copy 0 + %.6411 =w culew %.6409, %.6410 + jnz %.6411, @for_body.950, @for_join.952 +@for_body.950 + %.6413 =l add %.6412, 0 + %.6414 =l extsw 0 + %.6415 =l copy %.6414 + storel %.6415, %.6413 + %.6417 =l add %.6416, 0 + %.6418 =l copy %.6267 + %.6419 =l mul 8, 1 + %.6420 =l add %.6418, %.6419 + %.6421 =l copy %.6420 + storel %.6421, %.6417 + %.6423 =l add %.6422, 0 + %.6424 =l copy $g_185 + %.6425 =l mul 24, 1 + %.6426 =l add %.6424, %.6425 + %.6427 =l copy %.6426 + storel %.6427, %.6423 + %.6429 =l loadl $g_82 + %.6430 =l copy %.6429 + %.6431 =l mul %.6430, 4 + %.6432 =l add %.15, %.6431 + %.6433 =w loadsw %.6432 + %.6434 =w copy %.6433 + %.6435 =l loadl $g_82 + %.6436 =l copy %.6435 + %.6437 =l mul %.6436, 4 + %.6438 =l add %.15, %.6437 + %.6439 =w loadsw %.6438 + %.6440 =w copy %.6439 + %.6441 =w call $safe_rshift_func_uint8_t_u_u(w %.6434, w %.6440) + %.6442 =w extub %.6441 + %.6443 =w cnew %.6442, 0 + jnz %.6443, @logic_join.956, @logic_right.955 +@logic_right.955 + %.6444 =l loadl $g_82 + %.6445 =l copy %.6444 + %.6446 =l mul %.6445, 4 + %.6447 =l add %.15, %.6446 + %.6448 =w loadsw %.6447 + %.6449 =l extsw %.6448 + %.6450 =w copy 2112370878 + %.6451 =l loadl $g_1037 + %.6452 =l loadl %.6451 + %.6453 =l loadl %.6452 + storew %.6450, %.6453 + %.6454 =l extuw %.6450 + %.6455 =l xor %.6454, 3 + %.6456 =l xor %.6449, %.6455 + %.6457 =l loadl $g_1984 + %.6458 =l loadl %.6457 + storel %.6458, %.5947 + %.6459 =l extsw 0 + %.6460 =w ceql %.6458, %.6459 + %.6461 =w cnew %.6460, 0 + jnz %.6461, @logic_right.957, @logic_join.958 +@logic_right.957 + %.6462 =l extsw 1 + %.6463 =l mul %.6462, 1 + %.6464 =l add $g_132, %.6463 + %.6465 =w loadsb %.6464 + %.6466 =l loadl %.5313 + %.6467 =w loadsw %.6466 + %.6468 =w copy 3409771330 + %.6469 =w call $safe_add_func_int32_t_s_s(w %.6467, w %.6468) + %.6470 =w copy %.6469 + %.6471 =l loadl %.5193 + %.6472 =w copy %.6471 + %.6473 =w call $safe_mul_func_uint16_t_u_u(w %.6470, w %.6472) + %.6474 =w extuh %.6473 + %.6475 =w cnew %.6474, 0 +@logic_join.958 + %.6476 =w phi @logic_right.955 %.6461, @logic_right.957 %.6475 + %.6477 =l extsw %.6476 + %.6478 =w csgtl %.6456, %.6477 + %.6479 =w copy %.6478 + %.6480 =w copy 1 + %.6481 =w call $safe_mod_func_uint16_t_u_u(w %.6479, w %.6480) + %.6482 =w copy %.6481 + %.6483 =w call $safe_unary_minus_func_int8_t_s(w %.6482) + %.6484 =l extsb %.6483 + %.6485 =l extsw 0 + %.6486 =l sub %.6485, 7 + %.6487 =l copy %.6486 + %.6488 =l call $safe_sub_func_int64_t_s_s(l %.6484, l %.6487) + %.6489 =w copy %.6488 + %.6490 =l loadl %.6416 + storeh %.6489, %.6490 + %.6491 =w extsh %.6489 + %.6492 =w cnew %.6491, 0 +@logic_join.956 + %.6493 =w phi @for_body.950 %.6443, @logic_join.958 %.6492 + %.6494 =l extsw %.6493 + %.6495 =w loadsb %.5944 + %.6496 =l extsb %.6495 + %.6497 =l call $safe_div_func_uint64_t_u_u(l %.6494, l %.6496) + %.6498 =w cnel %.6497, 0 + jnz %.6498, @logic_join.954, @logic_right.953 +@logic_right.953 + %.6499 =w loaduw %.5949 + %.6500 =w cnew %.6499, 0 +@logic_join.954 + %.6501 =w phi @logic_join.956 %.6498, @logic_right.953 %.6500 + %.6502 =w loadsb $g_937 + %.6503 =w extsb %.6502 + %.6504 =w add %.6503, 7 + %.6505 =l extsw %.6504 + %.6506 =l mul %.6505, 4 + %.6507 =l add %.15, %.6506 + storew %.6501, %.6507 + %.6508 =w copy 46382 + %.6509 =l loadl %.6422 + %.6510 =l loadl %.6509 + %.6511 =w loadsb $g_937 + %.6512 =w extsb %.6511 + %.6513 =w add %.6512, 7 + %.6514 =l extsw %.6513 + %.6515 =l mul %.6514, 4 + %.6516 =l add %.15, %.6515 + %.6517 =w loadsw %.6516 + %.6518 =w ceqw %.6517, 0 + %.6519 =l extsw %.6518 + %.6520 =l or %.6510, %.6519 + storel %.6520, %.6509 + %.6521 =w loadsw %.6285 + %.6522 =w loadsb %.5322 + %.6523 =w extsb %.6522 + %.6524 =l loadl $g_1590 + %.6525 =w loaduh %.6524 + %.6526 =w loadsb $g_937 + %.6527 =w extsb %.6526 + %.6528 =w add %.6527, 7 + %.6529 =l extsw %.6528 + %.6530 =l mul %.6529, 4 + %.6531 =l add %.15, %.6530 + %.6532 =w loadsw %.6531 + %.6533 =w copy %.6532 + %.6534 =l extsw 4 + %.6535 =l mul %.6534, 1 + %.6536 =l add $g_132, %.6535 + storeb %.6533, %.6536 + %.6537 =w extsb %.6533 + %.6538 =w ceqw %.6537, 0 + %.6539 =w loadsb $g_937 + %.6540 =w extsb %.6539 + %.6541 =w add %.6540, 7 + %.6542 =l extsw %.6541 + %.6543 =l mul %.6542, 4 + %.6544 =l add %.15, %.6543 + %.6545 =w loadsw %.6544 + %.6546 =w cslew %.6538, %.6545 + %.6547 =w copy %.6546 + %.6548 =w call $safe_add_func_uint16_t_u_u(w %.6525, w %.6547) + %.6549 =w extuh %.6548 + %.6550 =w and %.6523, %.6549 + %.6551 =l extsw %.6550 + %.6552 =l loadl $g_82 + %.6553 =l copy %.6552 + %.6554 =l mul %.6553, 4 + %.6555 =l add %.15, %.6554 + %.6556 =w loadsw %.6555 + %.6557 =l extsw %.6556 + %.6558 =l loadl %.5307 + %.6559 =w loadsw %.6558 + %.6560 =w loaduh $g_2102 + %.6561 =w extuh %.6560 + %.6562 =w csltw %.6559, %.6561 + %.6563 =w copy %.6562 + %.6564 =l loadl %.5307 + %.6565 =w loadsw %.6564 + %.6566 =w copy %.6565 + %.6567 =w call $safe_mul_func_int16_t_s_s(w %.6563, w %.6566) + %.6568 =w loadsb %.5944 + %.6569 =w extsb %.6568 + %.6570 =w call $safe_mul_func_int16_t_s_s(w %.6567, w %.6569) + %.6571 =l extsh %.6570 + %.6572 =w csgel %.6571, 3216404459 + %.6573 =l extsw %.6572 + %.6574 =l xor %.6573, 50202 + %.6575 =w copy %.6574 + %.6576 =l loadl $g_1313 + %.6577 =l loadl %.6576 + %.6578 =l loadl %.6577 + %.6579 =l loadl %.6578 + storew %.6575, %.6579 + %.6580 =l loadl $g_82 + %.6581 =l copy %.6580 + %.6582 =l mul %.6581, 4 + %.6583 =l add %.15, %.6582 + %.6584 =w loadsw %.6583 + %.6585 =w copy %.6584 + %.6586 =w or %.6575, %.6585 + %.6587 =l loadl %.5313 + %.6588 =w loadsw %.6587 + %.6589 =w copy %.6588 + %.6590 =w cugew %.6586, %.6589 + %.6591 =w loadsh %.5196 + %.6592 =l extsh %.6591 + %.6593 =w cnel %.6592, 0 + %.6594 =w copy %.6593 + %.6595 =l loadl $g_1590 + %.6596 =w loaduh %.6595 + %.6597 =w call $safe_add_func_uint16_t_u_u(w %.6594, w %.6596) + %.6598 =l extuh %.6597 + %.6599 =l or %.6598, 13760 + %.6600 =l copy %.6599 + %.6601 =l call $safe_div_func_int64_t_s_s(l %.6557, l %.6600) + %.6602 =l extsw 0 + %.6603 =w cnel $g_1070, %.6602 + %.6604 =l extsw %.6603 + %.6605 =l call $safe_sub_func_int64_t_s_s(l %.6551, l %.6604) + %.6606 =l copy 1 + %.6607 =w cslel %.6605, %.6606 + %.6608 =w copy %.6607 + %.6609 =w loaduw %.5176 + %.6610 =w or %.6608, %.6609 + %.6611 =w copy %.6610 + %.6612 =l loadl %.6279 + %.6613 =w loadsw %.6612 + %.6614 =w copy %.6613 + %.6615 =w call $safe_mod_func_uint16_t_u_u(w %.6611, w %.6614) + %.6616 =l extuh %.6615 + %.6617 =l extsw 0 + %.6618 =l sub %.6617, 1 + %.6619 =l and %.6616, %.6618 + %.6620 =l xor %.6619, 2235 + %.6621 =w copy %.6620 + %.6622 =l extsw 0 + %.6623 =l mul %.6622, 4 + %.6624 =l add %.5192, %.6623 + %.6625 =w loadsw %.6624 + %.6626 =w call $safe_rshift_func_int16_t_s_s(w %.6621, w %.6625) + %.6627 =w extsh %.6626 + %.6628 =w csgtw %.6521, %.6627 + %.6629 =l extsw %.6628 + %.6630 =w cultl %.6520, %.6629 + %.6631 =w cnew %.6630, 0 + jnz %.6631, @logic_join.960, @logic_right.959 +@logic_right.959 + %.6632 =l loadl %.5199 + %.6633 =w cnel %.6632, 0 +@logic_join.960 + %.6634 =w phi @logic_join.954 %.6631, @logic_right.959 %.6633 + %.6635 =w copy %.6634 + %.6636 =w loaduw %.5952 + %.6637 =w or %.6635, %.6636 + %.6638 =w copy %.6637 + %.6639 =l loadl %.5313 + %.6640 =w loadsw %.6639 + %.6641 =w copy %.6640 + %.6642 =w call $safe_mul_func_int16_t_s_s(w %.6638, w %.6641) + %.6643 =w copy %.6642 + %.6644 =w call $safe_add_func_uint16_t_u_u(w %.6508, w %.6643) + %.6645 =w extuh %.6644 + %.6646 =l loadl %.5307 + storew %.6645, %.6646 + %.6647 =l loadl %.5313 + %.6648 =w loadsw %.6647 + %.6649 =w cnew %.6648, 0 + jnz %.6649, @if_true.961, @if_false.962 +@if_true.961 + jmp @for_join.952 +@if_false.962 + %.6650 =w loadsb %.5944 + %.6651 =l extsb %.6650 + ret %.6651 +@for_cont.951 + %.6652 =w loaduw %.61 + %.6653 =w copy 1 + %.6654 =w add %.6652, %.6653 + storew %.6654, %.61 + jmp @for_cond.949 +@for_join.952 +@for_cont.941 + %.6655 =w loadsb $g_937 + %.6656 =w extsb %.6655 + %.6657 =w add %.6656, 1 + %.6658 =w copy %.6657 + storeb %.6658, $g_937 + jmp @for_cond.939 +@for_join.942 + %.6659 =w copy 0 + storeb %.6659, $g_566 +@for_cond.963 + %.6660 =w loadub $g_566 + %.6661 =w extub %.6660 + %.6662 =w cslew %.6661, 0 + jnz %.6662, @for_body.964, @for_join.966 +@for_body.964 + %.6664 =l add %.6663, 0 + %.6665 =w copy 8 + storew %.6665, %.6664 + %.6666 =l add %.6663, 4 + %.6667 =w copy 1 + storew %.6667, %.6666 + %.6668 =l add %.6663, 8 + %.6669 =l extsw 0 + %.6670 =l sub %.6669, 8 + %.6671 =w copy %.6670 + storeh %.6671, %.6668 + %.6672 =l add %.6663, 10 + storeh 0, %.6672 + %.6673 =l add %.6663, 12 + %.6674 =w copy 789134719 + storew %.6674, %.6673 + %.6675 =l add %.6663, 16 + %.6676 =w copy 0 + storew %.6676, %.6675 + %.6677 =l add %.6663, 20 + %.6678 =w copy 8 + storew %.6678, %.6677 + %.6679 =l add %.6663, 24 + %.6680 =w copy 1 + storew %.6680, %.6679 + %.6681 =l add %.6663, 28 + %.6682 =l extsw 0 + %.6683 =l sub %.6682, 8 + %.6684 =w copy %.6683 + storeh %.6684, %.6681 + %.6685 =l add %.6663, 30 + storeh 0, %.6685 + %.6686 =l add %.6663, 32 + %.6687 =w copy 789134719 + storew %.6687, %.6686 + %.6688 =l add %.6663, 36 + %.6689 =w copy 0 + storew %.6689, %.6688 + %.6690 =l add %.6663, 40 + %.6691 =w copy 8 + storew %.6691, %.6690 + %.6692 =l add %.6663, 44 + %.6693 =w copy 1 + storew %.6693, %.6692 + %.6694 =l add %.6663, 48 + %.6695 =l extsw 0 + %.6696 =l sub %.6695, 8 + %.6697 =w copy %.6696 + storeh %.6697, %.6694 + %.6698 =l add %.6663, 50 + storeh 0, %.6698 + %.6699 =l add %.6663, 52 + %.6700 =w copy 789134719 + storew %.6700, %.6699 + %.6701 =l add %.6663, 56 + %.6702 =w copy 0 + storew %.6702, %.6701 + %.6704 =l add %.6703, 0 + storel $g_24, %.6704 + %.6705 =l add %.6703, 8 + %.6706 =l copy $g_518 + %.6707 =l mul 16, 1 + %.6708 =l add %.6706, %.6707 + %.6709 =l copy %.6708 + storel %.6709, %.6705 + %.6710 =l add %.6703, 16 + storel $g_24, %.6710 + %.6711 =l add %.6703, 24 + %.6712 =l copy $g_518 + %.6713 =l mul 16, 1 + %.6714 =l add %.6712, %.6713 + %.6715 =l copy %.6714 + storel %.6715, %.6711 + %.6716 =l add %.6703, 32 + storel $g_24, %.6716 + %.6717 =l add %.6703, 40 + %.6718 =l copy $g_518 + %.6719 =l mul 16, 1 + %.6720 =l add %.6718, %.6719 + %.6721 =l copy %.6720 + storel %.6721, %.6717 + %.6722 =l add %.6703, 48 + storel $g_24, %.6722 + %.6723 =l add %.6703, 56 + %.6724 =l copy $g_518 + %.6725 =l mul 16, 1 + %.6726 =l add %.6724, %.6725 + %.6727 =l copy %.6726 + storel %.6727, %.6723 + %.6729 =l extsw 0 + %.6730 =l mul %.6729, 20 + %.6731 =l add %.6663, %.6730 + %.6732 =l loadl $g_1123 + %.6733 =l loaduw %.6731 + storew %.6733, %.6732 + %.6734 =l add %.6731, 4 + %.6735 =l add %.6732, 4 + %.6736 =l loaduw %.6734 + storew %.6736, %.6735 + %.6737 =l add %.6734, 4 + %.6738 =l add %.6735, 4 + %.6739 =l loaduw %.6737 + storew %.6739, %.6738 + %.6740 =l add %.6737, 4 + %.6741 =l add %.6738, 4 + %.6742 =l loaduw %.6740 + storew %.6742, %.6741 + %.6743 =l add %.6740, 4 + %.6744 =l add %.6741, 4 + %.6745 =l loaduw %.6743 + storew %.6745, %.6744 + %.6746 =l add %.6743, 4 + %.6747 =l add %.6744, 4 + %.6748 =l copy $g_185 + %.6749 =l mul 16, 1 + %.6750 =l add %.6748, %.6749 + %.6751 =l copy %.6750 + storew 0, %.6751 +@for_cond.967 + %.6752 =l copy $g_185 + %.6753 =l mul 16, 1 + %.6754 =l add %.6752, %.6753 + %.6755 =l copy %.6754 + %.6756 =w loadsw %.6755 + %.6757 =w csgew %.6756, 0 + jnz %.6757, @for_body.968, @for_join.970 +@for_body.968 + %.6759 =l add %.6758, 0 + %.6760 =l copy $g_518 + %.6761 =l mul 48, 1 + %.6762 =l add %.6760, %.6761 + %.6763 =l copy %.6762 + storel %.6763, %.6759 + %.6764 =l add %.6758, 8 + %.6765 =l copy $g_518 + %.6766 =l mul 48, 1 + %.6767 =l add %.6765, %.6766 + %.6768 =l copy %.6767 + storel %.6768, %.6764 + %.6769 =l add %.6758, 16 + %.6770 =l copy $g_518 + %.6771 =l mul 48, 1 + %.6772 =l add %.6770, %.6771 + %.6773 =l copy %.6772 + storel %.6773, %.6769 + %.6774 =l add %.6758, 24 + %.6775 =l copy $g_518 + %.6776 =l mul 48, 1 + %.6777 =l add %.6775, %.6776 + %.6778 =l copy %.6777 + storel %.6778, %.6774 + %.6779 =l add %.6758, 32 + %.6780 =l copy $g_518 + %.6781 =l mul 48, 1 + %.6782 =l add %.6780, %.6781 + %.6783 =l copy %.6782 + storel %.6783, %.6779 + %.6784 =l add %.6758, 40 + %.6785 =l copy $g_518 + %.6786 =l mul 48, 1 + %.6787 =l add %.6785, %.6786 + %.6788 =l copy %.6787 + storel %.6788, %.6784 + %.6789 =l add %.6758, 48 + %.6790 =l copy $g_518 + %.6791 =l mul 48, 1 + %.6792 =l add %.6790, %.6791 + %.6793 =l copy %.6792 + storel %.6793, %.6789 + %.6794 =l add %.6758, 56 + %.6795 =l copy $g_518 + %.6796 =l mul 48, 1 + %.6797 =l add %.6795, %.6796 + %.6798 =l copy %.6797 + storel %.6798, %.6794 + %.6799 =l add %.6758, 64 + %.6800 =l copy $g_518 + %.6801 =l mul 48, 1 + %.6802 =l add %.6800, %.6801 + %.6803 =l copy %.6802 + storel %.6803, %.6799 + %.6805 =l add %.6804, 0 + storel $g_23, %.6805 + %.6807 =l loadl $g_23 + %.6808 =w loadsw %.6807 + %.6809 =l extsw %.6808 + %.6810 =l and %.6809, 1 + %.6811 =w copy %.6810 + storew %.6811, %.6807 + %.6812 =l extsw 0 + %.6813 =l mul %.6812, 8 + %.6814 =l add %.6703, %.6813 + %.6815 =l loadl %.6814 + %.6816 =l call $func_4(l %.5941, l %.6815, l %.5941) + %.6817 =l loadl %.6804 + storel %.6816, %.6817 +@for_cont.969 + %.6818 =l copy $g_185 + %.6819 =l mul 16, 1 + %.6820 =l add %.6818, %.6819 + %.6821 =l copy %.6820 + %.6822 =w loadsw %.6821 + %.6823 =w sub %.6822, 1 + storew %.6823, %.6821 + jmp @for_cond.967 +@for_join.970 +@for_cont.965 + %.6824 =w loadub $g_566 + %.6825 =w extub %.6824 + %.6826 =w add %.6825, 1 + %.6827 =w copy %.6826 + storeb %.6827, $g_566 + jmp @for_cond.963 +@for_join.966 + %.6828 =l copy $g_265 + %.6829 =l mul 48, 1 + %.6830 =l add %.6828, %.6829 + %.6831 =l copy %.6830 + storew 0, %.6831 +@for_cond.971 + %.6832 =l copy $g_265 + %.6833 =l mul 48, 1 + %.6834 =l add %.6832, %.6833 + %.6835 =l copy %.6834 + %.6836 =w loadsw %.6835 + %.6837 =w cslew %.6836, 0 + jnz %.6837, @for_body.972, @for_join.974 +@for_body.972 + %.6838 =l copy %.5955 + %.6839 =l mul 12, 1 + %.6840 =l add %.6838, %.6839 + %.6841 =l copy %.6840 + %.6842 =w loadsw %.6841 + %.6843 =l extsw %.6842 + ret %.6843 +@for_cont.973 + %.6844 =l copy $g_265 + %.6845 =l mul 48, 1 + %.6846 =l add %.6844, %.6845 + %.6847 =l copy %.6846 + %.6848 =w loadsw %.6847 + %.6849 =w add %.6848, 1 + storew %.6849, %.6847 + jmp @for_cond.971 +@for_join.974 + jmp @if_join.975 +@if_false.913 + %.6851 =l add %.6850, 0 + %.6852 =w copy 1 + storeh %.6852, %.6851 + %.6854 =l add %.6853, 0 + storel $g_1972, %.6854 + %.6856 =l add %.6855, 0 + %.6857 =l copy %.88 + %.6858 =l mul 8, 1 + %.6859 =l add %.6857, %.6858 + %.6860 =l copy %.6859 + storel %.6860, %.6856 + %.6862 =l add %.6861, 0 + %.6863 =l extsw 0 + %.6864 =l sub %.6863, 1 + %.6865 =w copy %.6864 + storew %.6865, %.6862 + %.6867 =l add %.6866, 0 + %.6868 =l extsw 0 + %.6869 =l sub %.6868, 8 + %.6870 =w copy %.6869 + storew %.6870, %.6867 + %.6872 =l add %.6871, 0 + %.6873 =w copy 192325631 + storew %.6873, %.6872 + %.6874 =l add %.6871, 4 + %.6875 =w copy 192325631 + storew %.6875, %.6874 + %.6876 =l add %.6871, 8 + %.6877 =w copy 192325631 + storew %.6877, %.6876 + %.6878 =l add %.6871, 12 + %.6879 =w copy 192325631 + storew %.6879, %.6878 + %.6880 =l add %.6871, 16 + %.6881 =w copy 192325631 + storew %.6881, %.6880 + %.6882 =l add %.6871, 20 + %.6883 =w copy 192325631 + storew %.6883, %.6882 + %.6885 =l copy $g_265 + %.6886 =l mul 24, 1 + %.6887 =l add %.6885, %.6886 + %.6888 =l copy %.6887 + %.6889 =l loadl %.6888 + %.6890 =w copy 26311 + %.6891 =w loadsh %.6850 + %.6892 =w extsh %.6891 + %.6893 =w call $safe_rshift_func_int16_t_s_s(w %.6890, w %.6892) + %.6894 =l extsh %.6893 + %.6895 =w copy 6 + %.6896 =w call $safe_unary_minus_func_uint8_t_u(w %.6895) + %.6897 =l loadl %.5307 + %.6898 =w loadsw %.6897 + storel $g_394, $g_2127 + %.6899 =w cnel $g_363, $g_394 + %.6900 =l loadl %.104 + %.6901 =l loadl %.5313 + %.6902 =w loadsw %.6901 + %.6903 =l extsw %.6902 + %.6904 =w csgel %.6903, 2 + %.6905 =w copy %.6904 + %.6906 =w loadsw %.5182 + %.6907 =w copy %.6906 + %.6908 =w call $safe_mod_func_uint8_t_u_u(w %.6905, w %.6907) + %.6909 =w copy %.6908 + %.6910 =l copy $g_794 + %.6911 =l mul 4, 1 + %.6912 =l add %.6910, %.6911 + %.6913 =l copy %.6912 + %.6914 =w loaduw %.6913 + %.6915 =w copy %.6914 + %.6916 =w call $safe_add_func_int8_t_s_s(w %.6909, w %.6915) + %.6917 =w extsb %.6916 + %.6918 =l extsw 0 + %.6919 =l mul %.6918, 4 + %.6920 =l add %.5192, %.6919 + %.6921 =w loadsw %.6920 + %.6922 =w call $safe_rshift_func_uint16_t_u_s(w %.6917, w %.6921) + %.6923 =w extuh %.6922 + %.6924 =l extsw 0 + %.6925 =l mul %.6924, 4 + %.6926 =l add %.5192, %.6925 + storew %.6923, %.6926 + storew %.6923, %.5182 + %.6927 =l extsw 0 + %.6928 =w cnel %.6900, %.6927 + %.6929 =l loadl %.1 + %.6930 =w loadsw %.6929 + %.6931 =w cnew %.6928, %.6930 + %.6932 =l extsw %.6931 + %.6933 =l loadl %.6853 + storel %.6932, %.6933 + %.6934 =l call $safe_add_func_int64_t_s_s(l %.6932, l 5017732426839066702) + %.6935 =w cnel %.6934, 0 + jnz %.6935, @logic_join.977, @logic_right.976 +@logic_right.976 + %.6936 =w loadsh %.6850 + %.6937 =w extsh %.6936 + %.6938 =w cnew %.6937, 0 +@logic_join.977 + %.6939 =w phi @if_false.913 %.6935, @logic_right.976 %.6938 + %.6940 =w xor %.6899, %.6939 + %.6941 =w copy %.6940 + %.6942 =w copy 7 + %.6943 =w call $safe_rshift_func_int8_t_s_u(w %.6941, w %.6942) + %.6944 =w extsb %.6943 + %.6945 =l loadl %.6855 + storeh %.6944, %.6945 + %.6946 =w copy 65535 + %.6947 =w call $safe_mod_func_int16_t_s_s(w %.6944, w %.6946) + %.6948 =l extsh %.6947 + %.6949 =l loadl %.5193 + %.6950 =w ceql %.6948, %.6949 + %.6951 =w copy %.6950 + %.6952 =l loadl $g_1313 + %.6953 =l loadl %.6952 + %.6954 =l loadl %.6953 + %.6955 =l loadl %.6954 + %.6956 =w loaduw %.6955 + %.6957 =w culew %.6951, %.6956 + %.6958 =w csltw %.6898, %.6957 + %.6959 =w call $safe_unary_minus_func_int32_t_s(w %.6958) + %.6960 =l extsw %.6959 + %.6961 =w csgel %.6960, 44916 + %.6962 =l extsw %.6961 + %.6963 =l call $safe_add_func_uint64_t_u_u(l %.6894, l %.6962) + %.6964 =w loadsh %.5196 + %.6965 =l extsh %.6964 + %.6966 =w culel %.6963, %.6965 + %.6967 =l loadl %.5307 + %.6968 =w loadsw %.6967 + %.6969 =w cnew %.6966, %.6968 + %.6970 =l extsw %.6969 + %.6971 =w ceql %.6970, 3 + %.6972 =w loadsh %.6850 + %.6973 =w extsh %.6972 + %.6974 =w or %.6971, %.6973 + %.6975 =w xor %.6974, 18446744073709551615 + %.6976 =w loadsh %.106 + %.6977 =w extsh %.6976 + %.6978 =w csgtw %.6975, %.6977 + %.6979 =l extsw 1 + %.6980 =l mul %.6979, 4 + %.6981 =l add %.5202, %.6980 + %.6982 =w loadsw %.6981 + %.6983 =w and %.6978, %.6982 + %.6984 =w copy %.6983 + %.6985 =l extsw 1 + %.6986 =l mul %.6985, 4 + %.6987 =l add %.5202, %.6986 + %.6988 =w loadsw %.6987 + %.6989 =w copy %.6988 + %.6990 =w call $safe_mod_func_uint8_t_u_u(w %.6984, w %.6989) + %.6991 =w extub %.6990 + %.6992 =l loadl $g_1038 + %.6993 =l loadl %.6992 + %.6994 =w loaduw %.6993 + %.6995 =w and %.6991, %.6994 + %.6996 =l extuw %.6995 + %.6997 =l and %.6996, 18446744073709551613 + %.6998 =w copy %.6997 + %.6999 =l loadl $g_422 + storew %.6998, %.6999 + %.7000 =w cnel 1, 0 + jnz %.7000, @if_true.978, @if_false.979 +@if_true.978 + %.7001 =l loadl %.1 + %.7002 =w loadsw %.7001 + %.7003 =l extsw %.7002 + ret %.7003 +@if_false.979 + %.7005 =l add %.7004, 0 + %.7006 =w copy 3605607459 + storew %.7006, %.7005 + %.7007 =l add %.7004, 4 + %.7008 =w copy 7 + storew %.7008, %.7007 + %.7009 =l add %.7004, 8 + %.7010 =w copy 3827000415 + storew %.7010, %.7009 + %.7011 =l add %.7004, 12 + %.7012 =w copy 737796084 + storew %.7012, %.7011 + %.7013 =l add %.7004, 16 + %.7014 =w copy 2981231114 + storew %.7014, %.7013 + %.7015 =l add %.7004, 20 + %.7016 =w copy 18446744073709551615 + storew %.7016, %.7015 + %.7017 =l add %.7004, 24 + %.7018 =w copy 18446744073709551613 + storew %.7018, %.7017 + %.7019 =l add %.7004, 28 + %.7020 =w copy 1691684583 + storew %.7020, %.7019 + %.7021 =l add %.7004, 32 + %.7022 =w copy 1699922327 + storew %.7022, %.7021 + %.7023 =l add %.7004, 36 + %.7024 =w copy 644777404 + storew %.7024, %.7023 + %.7025 =l add %.7004, 40 + %.7026 =w copy 18446744073709551615 + storew %.7026, %.7025 + %.7027 =l add %.7004, 44 + %.7028 =w copy 7 + storew %.7028, %.7027 + %.7029 =l add %.7004, 48 + %.7030 =w copy 5 + storew %.7030, %.7029 + %.7031 =l add %.7004, 52 + %.7032 =w copy 3681951840 + storew %.7032, %.7031 + %.7033 =l add %.7004, 56 + %.7034 =w copy 2389908307 + storew %.7034, %.7033 + %.7035 =l add %.7004, 60 + %.7036 =w copy 3418309949 + storew %.7036, %.7035 + %.7037 =l add %.7004, 64 + %.7038 =w copy 1 + storew %.7038, %.7037 + %.7039 =l add %.7004, 68 + %.7040 =w copy 2 + storew %.7040, %.7039 + %.7041 =l add %.7004, 72 + %.7042 =w copy 2125129727 + storew %.7042, %.7041 + %.7043 =l add %.7004, 76 + %.7044 =w copy 0 + storew %.7044, %.7043 + %.7045 =l add %.7004, 80 + %.7046 =w copy 18446744073709551611 + storew %.7046, %.7045 + %.7047 =l add %.7004, 84 + %.7048 =w copy 18446744073709551606 + storew %.7048, %.7047 + %.7049 =l add %.7004, 88 + %.7050 =w copy 18446744073709551609 + storew %.7050, %.7049 + %.7051 =l add %.7004, 92 + %.7052 =w copy 1294648098 + storew %.7052, %.7051 + %.7053 =l add %.7004, 96 + %.7054 =w copy 927038418 + storew %.7054, %.7053 + %.7055 =l add %.7004, 100 + %.7056 =w copy 18446744073709551608 + storew %.7056, %.7055 + %.7057 =l add %.7004, 104 + %.7058 =w copy 18446744073709551613 + storew %.7058, %.7057 + %.7059 =l add %.7004, 108 + %.7060 =w copy 4187825284 + storew %.7060, %.7059 + %.7061 =l add %.7004, 112 + %.7062 =w copy 5 + storew %.7062, %.7061 + %.7063 =l add %.7004, 116 + %.7064 =w copy 3681951840 + storew %.7064, %.7063 + %.7065 =l add %.7004, 120 + %.7066 =w copy 18446744073709551615 + storew %.7066, %.7065 + %.7067 =l add %.7004, 124 + %.7068 =w copy 1 + storew %.7068, %.7067 + %.7069 =l add %.7004, 128 + %.7070 =w copy 18446744073709551608 + storew %.7070, %.7069 + %.7071 =l add %.7004, 132 + %.7072 =w copy 459369300 + storew %.7072, %.7071 + %.7073 =l add %.7004, 136 + %.7074 =w copy 1827016989 + storew %.7074, %.7073 + %.7075 =l add %.7004, 140 + %.7076 =w copy 680997031 + storew %.7076, %.7075 + %.7077 =l add %.7004, 144 + %.7078 =w copy 1639435908 + storew %.7078, %.7077 + %.7079 =l add %.7004, 148 + %.7080 =w copy 3644077451 + storew %.7080, %.7079 + %.7081 =l add %.7004, 152 + %.7082 =w copy 1 + storew %.7082, %.7081 + %.7083 =l add %.7004, 156 + %.7084 =w copy 2125129727 + storew %.7084, %.7083 + %.7085 =l add %.7004, 160 + %.7086 =w copy 2569114947 + storew %.7086, %.7085 + %.7087 =l add %.7004, 164 + %.7088 =w copy 3605607459 + storew %.7088, %.7087 + %.7089 =l add %.7004, 168 + %.7090 =w copy 18446744073709551608 + storew %.7090, %.7089 + %.7091 =l add %.7004, 172 + %.7092 =w copy 3218957464 + storew %.7092, %.7091 + %.7093 =l add %.7004, 176 + %.7094 =w copy 1 + storew %.7094, %.7093 + %.7095 =l add %.7004, 180 + %.7096 =w copy 18446744073709551615 + storew %.7096, %.7095 + %.7097 =l add %.7004, 184 + %.7098 =w copy 7 + storew %.7098, %.7097 + %.7099 =l add %.7004, 188 + %.7100 =w copy 7 + storew %.7100, %.7099 + %.7101 =l add %.7004, 192 + %.7102 =w copy 18446744073709551615 + storew %.7102, %.7101 + %.7103 =l add %.7004, 196 + %.7104 =w copy 2044415521 + storew %.7104, %.7103 + %.7105 =l add %.7004, 200 + %.7106 =w copy 18446744073709551608 + storew %.7106, %.7105 + %.7107 =l add %.7004, 204 + %.7108 =w copy 0 + storew %.7108, %.7107 + %.7109 =l add %.7004, 208 + %.7110 =w copy 3827000415 + storew %.7110, %.7109 + %.7111 =l add %.7004, 212 + %.7112 =w copy 2981231114 + storew %.7112, %.7111 + %.7113 =l add %.7004, 216 + %.7114 =w copy 4121401781 + storew %.7114, %.7113 + %.7115 =l add %.7004, 220 + %.7116 =w copy 18446744073709551608 + storew %.7116, %.7115 + %.7117 =l add %.7004, 224 + %.7118 =w copy 18446744073709551613 + storew %.7118, %.7117 + %.7119 =l add %.7004, 228 + %.7120 =w copy 18446744073709551615 + storew %.7120, %.7119 + %.7121 =l add %.7004, 232 + %.7122 =w copy 18446744073709551611 + storew %.7122, %.7121 + %.7123 =l add %.7004, 236 + %.7124 =w copy 18446744073709551608 + storew %.7124, %.7123 + %.7125 =l add %.7004, 240 + %.7126 =w copy 18446744073709551609 + storew %.7126, %.7125 + %.7127 =l add %.7004, 244 + %.7128 =w copy 18446744073709551606 + storew %.7128, %.7127 + %.7129 =l add %.7004, 248 + %.7130 =w copy 0 + storew %.7130, %.7129 + %.7131 =l add %.7004, 252 + %.7132 =w copy 1925250850 + storew %.7132, %.7131 + %.7133 =l add %.7004, 256 + %.7134 =w copy 2934917713 + storew %.7134, %.7133 + %.7135 =l add %.7004, 260 + %.7136 =w copy 5 + storew %.7136, %.7135 + %.7137 =l add %.7004, 264 + %.7138 =w copy 18446744073709551615 + storew %.7138, %.7137 + %.7139 =l add %.7004, 268 + %.7140 =w copy 3418309949 + storew %.7140, %.7139 + %.7141 =l add %.7004, 272 + %.7142 =w copy 4073918674 + storew %.7142, %.7141 + %.7143 =l add %.7004, 276 + %.7144 =w copy 18446744073709551615 + storew %.7144, %.7143 + %.7145 =l add %.7004, 280 + %.7146 =w copy 2 + storew %.7146, %.7145 + %.7147 =l add %.7004, 284 + %.7148 =w copy 1639435908 + storew %.7148, %.7147 + %.7149 =l add %.7004, 288 + %.7150 =w copy 1827016989 + storew %.7150, %.7149 + %.7151 =l add %.7004, 292 + %.7152 =w copy 644777404 + storew %.7152, %.7151 + %.7153 =l add %.7004, 296 + %.7154 =w copy 940826840 + storew %.7154, %.7153 + %.7155 =l add %.7004, 300 + %.7156 =w copy 18446744073709551613 + storew %.7156, %.7155 + %.7157 =l add %.7004, 304 + %.7158 =w copy 18446744073709551613 + storew %.7158, %.7157 + %.7159 =l add %.7004, 308 + %.7160 =w copy 749146208 + storew %.7160, %.7159 + %.7161 =l add %.7004, 312 + %.7162 =w copy 18446744073709551606 + storew %.7162, %.7161 + %.7163 =l add %.7004, 316 + %.7164 =w copy 737796084 + storew %.7164, %.7163 + %.7165 =l add %.7004, 320 + %.7166 =w copy 18446744073709551606 + storew %.7166, %.7165 + %.7167 =l add %.7004, 324 + %.7168 =w copy 749146208 + storew %.7168, %.7167 + %.7169 =l add %.7004, 328 + %.7170 =w copy 1768884348 + storew %.7170, %.7169 + %.7171 =l add %.7004, 332 + %.7172 =w copy 5 + storew %.7172, %.7171 + %.7173 =l add %.7004, 336 + %.7174 =w copy 4037700173 + storew %.7174, %.7173 + %.7175 =l add %.7004, 340 + %.7176 =w copy 18446744073709551609 + storew %.7176, %.7175 + %.7177 =l add %.7004, 344 + %.7178 =w copy 18446744073709551615 + storew %.7178, %.7177 + %.7179 =l add %.7004, 348 + %.7180 =w copy 3644077451 + storew %.7180, %.7179 + %.7181 =l add %.7004, 352 + %.7182 =w copy 1 + storew %.7182, %.7181 + %.7183 =l add %.7004, 356 + %.7184 =w copy 18446744073709551615 + storew %.7184, %.7183 + %.7185 =l add %.7004, 360 + %.7186 =w copy 18446744073709551609 + storew %.7186, %.7185 + %.7187 =l add %.7004, 364 + %.7188 =w copy 2934917713 + storew %.7188, %.7187 + %.7189 =l add %.7004, 368 + %.7190 =w copy 2 + storew %.7190, %.7189 + %.7191 =l add %.7004, 372 + %.7192 =w copy 3155281286 + storew %.7192, %.7191 + %.7193 =l add %.7004, 376 + %.7194 =w copy 3643049425 + storew %.7194, %.7193 + %.7195 =l add %.7004, 380 + %.7196 =w copy 1340931701 + storew %.7196, %.7195 + %.7197 =l add %.7004, 384 + %.7198 =w copy 18446744073709551606 + storew %.7198, %.7197 + %.7199 =l add %.7004, 388 + %.7200 =w copy 1 + storew %.7200, %.7199 + %.7201 =l add %.7004, 392 + %.7202 =w copy 4037700173 + storew %.7202, %.7201 + %.7203 =l add %.7004, 396 + %.7204 =w copy 3644077451 + storew %.7204, %.7203 + %.7205 =l add %.7004, 400 + %.7206 =w copy 2934917713 + storew %.7206, %.7205 + %.7207 =l add %.7004, 404 + %.7208 =w copy 1 + storew %.7208, %.7207 + %.7209 =l add %.7004, 408 + %.7210 =w copy 18446744073709551615 + storew %.7210, %.7209 + %.7211 =l add %.7004, 412 + %.7212 =w copy 0 + storew %.7212, %.7211 + %.7213 =l add %.7004, 416 + %.7214 =w copy 3218957464 + storew %.7214, %.7213 + %.7215 =l add %.7004, 420 + %.7216 =w copy 749146208 + storew %.7216, %.7215 + %.7217 =l add %.7004, 424 + %.7218 =w copy 7 + storew %.7218, %.7217 + %.7219 =l add %.7004, 428 + %.7220 =w copy 2125129727 + storew %.7220, %.7219 + %.7221 =l add %.7004, 432 + %.7222 =w copy 1463937332 + storew %.7222, %.7221 + %.7223 =l add %.7004, 436 + %.7224 =w copy 2 + storew %.7224, %.7223 + %.7225 =l add %.7004, 440 + %.7226 =w copy 36518684 + storew %.7226, %.7225 + %.7227 =l add %.7004, 444 + %.7228 =w copy 1691684583 + storew %.7228, %.7227 + %.7229 =l add %.7004, 448 + %.7230 =w copy 2 + storew %.7230, %.7229 + %.7231 =l add %.7004, 452 + %.7232 =w copy 18446744073709551615 + storew %.7232, %.7231 + %.7233 =l add %.7004, 456 + %.7234 =w copy 18446744073709551613 + storew %.7234, %.7233 + %.7235 =l add %.7004, 460 + %.7236 =w copy 7 + storew %.7236, %.7235 + %.7237 =l add %.7004, 464 + %.7238 =w copy 18446744073709551613 + storew %.7238, %.7237 + %.7239 =l add %.7004, 468 + %.7240 =w copy 18446744073709551615 + storew %.7240, %.7239 + %.7241 =l add %.7004, 472 + %.7242 =w copy 940826840 + storew %.7242, %.7241 + %.7243 =l add %.7004, 476 + %.7244 =w copy 1925250850 + storew %.7244, %.7243 + %.7245 =l add %.7004, 480 + %.7246 =w copy 1340931701 + storew %.7246, %.7245 + %.7247 =l add %.7004, 484 + %.7248 =w copy 18446744073709551615 + storew %.7248, %.7247 + %.7249 =l add %.7004, 488 + %.7250 =w copy 1827016989 + storew %.7250, %.7249 + %.7251 =l add %.7004, 492 + %.7252 =w copy 2256088511 + storew %.7252, %.7251 + %.7253 =l add %.7004, 496 + %.7254 =w copy 2058348708 + storew %.7254, %.7253 + %.7255 =l add %.7004, 500 + %.7256 =w copy 411449477 + storew %.7256, %.7255 + %.7257 =l add %.7004, 504 + %.7258 =w copy 18446744073709551613 + storew %.7258, %.7257 + %.7259 =l add %.7004, 508 + %.7260 =w copy 1 + storew %.7260, %.7259 + %.7261 =l add %.7004, 512 + %.7262 =w copy 18446744073709551606 + storew %.7262, %.7261 + %.7263 =l add %.7004, 516 + %.7264 =w copy 1 + storew %.7264, %.7263 + %.7265 =l add %.7004, 520 + %.7266 =w copy 0 + storew %.7266, %.7265 + %.7267 =l add %.7004, 524 + %.7268 =w copy 18446744073709551613 + storew %.7268, %.7267 + %.7269 =l add %.7004, 528 + %.7270 =w copy 4183864120 + storew %.7270, %.7269 + %.7271 =l add %.7004, 532 + %.7272 =w copy 2044415521 + storew %.7272, %.7271 + %.7273 =l add %.7004, 536 + %.7274 =w copy 2256088511 + storew %.7274, %.7273 + %.7275 =l add %.7004, 540 + %.7276 =w copy 1 + storew %.7276, %.7275 + %.7277 =l add %.7004, 544 + %.7278 =w copy 5 + storew %.7278, %.7277 + %.7279 =l add %.7004, 548 + %.7280 =w copy 4183864120 + storew %.7280, %.7279 + %.7281 =l add %.7004, 552 + %.7282 =w copy 18446744073709551615 + storew %.7282, %.7281 + %.7283 =l add %.7004, 556 + %.7284 =w copy 3644077451 + storew %.7284, %.7283 + %.7285 =l add %.7004, 560 + %.7286 =w copy 18446744073709551608 + storew %.7286, %.7285 + %.7287 =l add %.7004, 564 + %.7288 =w copy 18446744073709551615 + storew %.7288, %.7287 + %.7289 =l add %.7004, 568 + %.7290 =w copy 7 + storew %.7290, %.7289 + %.7291 =l add %.7004, 572 + %.7292 =w copy 18446744073709551613 + storew %.7292, %.7291 + %.7293 =l add %.7004, 576 + %.7294 =w copy 1827016989 + storew %.7294, %.7293 + %.7295 =l add %.7004, 580 + %.7296 =w copy 935585686 + storew %.7296, %.7295 + %.7297 =l add %.7004, 584 + %.7298 =w copy 1691684583 + storew %.7298, %.7297 + %.7299 =l add %.7004, 588 + %.7300 =w copy 680997031 + storew %.7300, %.7299 + %.7301 =l add %.7004, 592 + %.7302 =w copy 7 + storew %.7302, %.7301 + %.7303 =l add %.7004, 596 + %.7304 =w copy 921227315 + storew %.7304, %.7303 + %.7305 =l add %.7004, 600 + %.7306 =w copy 2125129727 + storew %.7306, %.7305 + %.7307 =l add %.7004, 604 + %.7308 =w copy 749146208 + storew %.7308, %.7307 + %.7309 =l add %.7004, 608 + %.7310 =w copy 18446744073709551613 + storew %.7310, %.7309 + %.7311 =l add %.7004, 612 + %.7312 =w copy 2934917713 + storew %.7312, %.7311 + %.7313 =l add %.7004, 616 + %.7314 =w copy 5 + storew %.7314, %.7313 + %.7315 =l add %.7004, 620 + %.7316 =w copy 0 + storew %.7316, %.7315 + %.7317 =l add %.7004, 624 + %.7318 =w copy 258848418 + storew %.7318, %.7317 + %.7319 =l add %.7004, 628 + %.7320 =w copy 18446744073709551615 + storew %.7320, %.7319 + %.7321 =l add %.7004, 632 + %.7322 =w copy 18446744073709551613 + storew %.7322, %.7321 + %.7323 =l add %.7004, 636 + %.7324 =w copy 3643049425 + storew %.7324, %.7323 + %.7325 =l add %.7004, 640 + %.7326 =w copy 7 + storew %.7326, %.7325 + %.7327 =l add %.7004, 644 + %.7328 =w copy 1 + storew %.7328, %.7327 + %.7329 =l add %.7004, 648 + %.7330 =w copy 18446744073709551609 + storew %.7330, %.7329 + %.7331 =l add %.7004, 652 + %.7332 =w copy 18446744073709551609 + storew %.7332, %.7331 + %.7333 =l add %.7004, 656 + %.7334 =w copy 0 + storew %.7334, %.7333 + %.7335 =l add %.7004, 660 + %.7336 =w copy 18446744073709551608 + storew %.7336, %.7335 + %.7337 =l add %.7004, 664 + %.7338 =w copy 0 + storew %.7338, %.7337 + %.7339 =l add %.7004, 668 + %.7340 =w copy 680997031 + storew %.7340, %.7339 + %.7341 =l add %.7004, 672 + %.7342 =w copy 18446744073709551611 + storew %.7342, %.7341 + %.7343 =l add %.7004, 676 + %.7344 =w copy 0 + storew %.7344, %.7343 + %.7345 =l add %.7004, 680 + %.7346 =w copy 459369300 + storew %.7346, %.7345 + %.7347 =l add %.7004, 684 + %.7348 =w copy 3155281286 + storew %.7348, %.7347 + %.7349 =l add %.7004, 688 + %.7350 =w copy 36518684 + storew %.7350, %.7349 + %.7351 =l add %.7004, 692 + %.7352 =w copy 18446744073709551615 + storew %.7352, %.7351 + %.7353 =l add %.7004, 696 + %.7354 =w copy 1750864649 + storew %.7354, %.7353 + %.7355 =l add %.7004, 700 + %.7356 =w copy 940826840 + storew %.7356, %.7355 + %.7357 =l add %.7004, 704 + %.7358 =w copy 3218957464 + storew %.7358, %.7357 + %.7359 =l add %.7004, 708 + %.7360 =w copy 0 + storew %.7360, %.7359 + %.7361 =l add %.7004, 712 + %.7362 =w copy 18446744073709551615 + storew %.7362, %.7361 + %.7363 =l add %.7004, 716 + %.7364 =w copy 1 + storew %.7364, %.7363 + %.7365 =l add %.7004, 720 + %.7366 =w copy 2934917713 + storew %.7366, %.7365 + %.7367 =l add %.7004, 724 + %.7368 =w copy 3644077451 + storew %.7368, %.7367 + %.7369 =l add %.7004, 728 + %.7370 =w copy 5 + storew %.7370, %.7369 + %.7371 =l add %.7004, 732 + %.7372 =w copy 2219850352 + storew %.7372, %.7371 + %.7373 =l add %.7004, 736 + %.7374 =w copy 2 + storew %.7374, %.7373 + %.7375 =l add %.7004, 740 + %.7376 =w copy 18446744073709551610 + storew %.7376, %.7375 + %.7377 =l add %.7004, 744 + %.7378 =w copy 3218957464 + storew %.7378, %.7377 + %.7379 =l add %.7004, 748 + %.7380 =w copy 2 + storew %.7380, %.7379 + %.7381 =l add %.7004, 752 + %.7382 =w copy 0 + storew %.7382, %.7381 + %.7383 =l add %.7004, 756 + %.7384 =w copy 927038418 + storew %.7384, %.7383 + %.7385 =l add %.7004, 760 + %.7386 =w copy 18446744073709551608 + storew %.7386, %.7385 + %.7387 =l add %.7004, 764 + %.7388 =w copy 3739975818 + storew %.7388, %.7387 + %.7389 =l add %.7004, 768 + %.7390 =w copy 18446744073709551612 + storew %.7390, %.7389 + %.7391 =l add %.7004, 772 + %.7392 =w copy 0 + storew %.7392, %.7391 + %.7393 =l add %.7004, 776 + %.7394 =w copy 737796084 + storew %.7394, %.7393 + %.7395 =l add %.7004, 780 + %.7396 =w copy 18446744073709551615 + storew %.7396, %.7395 + %.7397 =l add %.7004, 784 + %.7398 =w copy 1 + storew %.7398, %.7397 + %.7399 =l add %.7004, 788 + %.7400 =w copy 2764042410 + storew %.7400, %.7399 + %.7401 =l add %.7004, 792 + %.7402 =w copy 18446744073709551611 + storew %.7402, %.7401 + %.7403 =l add %.7004, 796 + %.7404 =w copy 411449477 + storew %.7404, %.7403 + %.7405 =l add %.7004, 800 + %.7406 =w copy 1691684583 + storew %.7406, %.7405 + %.7407 =l add %.7004, 804 + %.7408 =w copy 2389908307 + storew %.7408, %.7407 + %.7409 =l add %.7004, 808 + %.7410 =w copy 1691684583 + storew %.7410, %.7409 + %.7411 =l add %.7004, 812 + %.7412 =w copy 3739975818 + storew %.7412, %.7411 + %.7413 =l add %.7004, 816 + %.7414 =w copy 4121401781 + storew %.7414, %.7413 + %.7415 =l add %.7004, 820 + %.7416 =w copy 4121401781 + storew %.7416, %.7415 + %.7417 =l add %.7004, 824 + %.7418 =w copy 3739975818 + storew %.7418, %.7417 + %.7419 =l add %.7004, 828 + %.7420 =w copy 18446744073709551613 + storew %.7420, %.7419 + %.7421 =l add %.7004, 832 + %.7422 =w copy 644777404 + storew %.7422, %.7421 + %.7423 =l add %.7004, 836 + %.7424 =w copy 1925250850 + storew %.7424, %.7423 + %.7425 =l add %.7004, 840 + %.7426 =w copy 3681951840 + storew %.7426, %.7425 + %.7427 =l add %.7004, 844 + %.7428 =w copy 2934917713 + storew %.7428, %.7427 + %.7429 =l add %.7004, 848 + %.7430 =w copy 2044415521 + storew %.7430, %.7429 + %.7431 =l add %.7004, 852 + %.7432 =w copy 18446744073709551609 + storew %.7432, %.7431 + %.7433 =l add %.7004, 856 + %.7434 =w copy 2389908307 + storew %.7434, %.7433 + %.7435 =l add %.7004, 860 + %.7436 =w copy 460029231 + storew %.7436, %.7435 + %.7437 =l add %.7004, 864 + %.7438 =w copy 5 + storew %.7438, %.7437 + %.7439 =l add %.7004, 868 + %.7440 =w copy 460029231 + storew %.7440, %.7439 + %.7441 =l add %.7004, 872 + %.7442 =w copy 1 + storew %.7442, %.7441 + %.7443 =l add %.7004, 876 + %.7444 =w copy 1 + storew %.7444, %.7443 + %.7445 =l add %.7004, 880 + %.7446 =w copy 18446744073709551613 + storew %.7446, %.7445 + %.7447 =l add %.7004, 884 + %.7448 =w copy 3218957464 + storew %.7448, %.7447 + %.7449 =l add %.7004, 888 + %.7450 =w copy 3681951840 + storew %.7450, %.7449 + %.7451 =l add %.7004, 892 + %.7452 =w copy 1925250850 + storew %.7452, %.7451 + %.7453 =l add %.7004, 896 + %.7454 =w copy 644777404 + storew %.7454, %.7453 + %.7455 =l add %.7004, 900 + %.7456 =w copy 1691684583 + storew %.7456, %.7455 + %.7457 =l add %.7004, 904 + %.7458 =w copy 258848418 + storew %.7458, %.7457 + %.7459 =l add %.7004, 908 + %.7460 =w copy 0 + storew %.7460, %.7459 + %.7461 =l add %.7004, 912 + %.7462 =w copy 18446744073709551615 + storew %.7462, %.7461 + %.7463 =l add %.7004, 916 + %.7464 =w copy 3739975818 + storew %.7464, %.7463 + %.7465 =l add %.7004, 920 + %.7466 =w copy 1691684583 + storew %.7466, %.7465 + %.7467 =l add %.7004, 924 + %.7468 =w copy 2389908307 + storew %.7468, %.7467 + %.7469 =l add %.7004, 928 + %.7470 =w copy 1890112767 + storew %.7470, %.7469 + %.7471 =l add %.7004, 932 + %.7472 =w copy 1004611940 + storew %.7472, %.7471 + %.7473 =l add %.7004, 936 + %.7474 =w copy 2631950342 + storew %.7474, %.7473 + %.7475 =l add %.7004, 940 + %.7476 =w copy 1886988034 + storew %.7476, %.7475 + %.7477 =l add %.7004, 944 + %.7478 =w copy 1 + storew %.7478, %.7477 + %.7479 =l add %.7004, 948 + %.7480 =w copy 18446744073709551615 + storew %.7480, %.7479 + %.7481 =l add %.7004, 952 + %.7482 =w copy 737796084 + storew %.7482, %.7481 + %.7483 =l add %.7004, 956 + %.7484 =w copy 18446744073709551615 + storew %.7484, %.7483 + %.7485 =l add %.7004, 960 + %.7486 =w copy 18446744073709551608 + storew %.7486, %.7485 + %.7487 =l add %.7004, 964 + %.7488 =w copy 18446744073709551613 + storew %.7488, %.7487 + %.7489 =l add %.7004, 968 + %.7490 =w copy 2569114947 + storew %.7490, %.7489 + %.7491 =l add %.7004, 972 + %.7492 =w copy 927038418 + storew %.7492, %.7491 + %.7493 =l add %.7004, 976 + %.7494 =w copy 0 + storew %.7494, %.7493 + %.7495 =l add %.7004, 980 + %.7496 =w copy 2 + storew %.7496, %.7495 + %.7497 =l add %.7004, 984 + %.7498 =w copy 2934917713 + storew %.7498, %.7497 + %.7499 =l add %.7004, 988 + %.7500 =w copy 18446744073709551615 + storew %.7500, %.7499 + %.7501 =l add %.7004, 992 + %.7502 =w copy 3418309949 + storew %.7502, %.7501 + %.7503 =l add %.7004, 996 + %.7504 =w copy 2044415521 + storew %.7504, %.7503 + %.7505 =l add %.7004, 1000 + %.7506 =w copy 5 + storew %.7506, %.7505 + %.7507 =l add %.7004, 1004 + %.7508 =w copy 3644077451 + storew %.7508, %.7507 + %.7512 =w loaduw %.5325 + %.7513 =w add %.7512, 1 + storew %.7513, %.5325 + storew 0, $g_24 +@for_cond.981 + %.7514 =w loadsw $g_24 + %.7515 =w cslew %.7514, 0 + jnz %.7515, @for_body.982, @for_join.984 +@for_body.982 + %.7517 =l add %.7516, 0 + %.7518 =w copy 2322715888 + storew %.7518, %.7517 + %.7520 =l add %.7519, 0 + %.7521 =w copy 562249091 + storew %.7521, %.7520 + %.7523 =l add %.7522, 0 + %.7524 =l copy $g_185 + %.7525 =l mul 16, 1 + %.7526 =l add %.7524, %.7525 + %.7527 =l copy %.7526 + storel %.7527, %.7523 + %.7529 =l add %.7528, 0 + %.7530 =l copy $g_185 + %.7531 =l mul 40, 1 + %.7532 =l add %.7530, %.7531 + %.7533 =l copy %.7532 + storel %.7533, %.7529 + %.7535 =l add %.7534, 0 + %.7536 =l copy $g_1183 + %.7537 =l mul 40, 1 + %.7538 =l add %.7536, %.7537 + %.7539 =l copy %.7538 + storel %.7539, %.7535 + %.7541 =l add %.7540, 0 + %.7542 =l copy $g_518 + %.7543 =l mul 16, 1 + %.7544 =l add %.7542, %.7543 + %.7545 =l copy %.7544 + storel %.7545, %.7541 + %.7547 =l add %.7546, 0 + %.7548 =l copy $g_1183 + %.7549 =l mul 16, 1 + %.7550 =l add %.7548, %.7549 + %.7551 =l copy %.7550 + storel %.7551, %.7547 + %.7553 =l add %.7552, 0 + %.7554 =l copy $g_794 + %.7555 =l mul 0, 1 + %.7556 =l add %.7554, %.7555 + %.7557 =l copy %.7556 + storel %.7557, %.7553 + %.7559 =l add %.7558, 0 + %.7560 =l extsw 0 + %.7561 =l copy %.7560 + storel %.7561, %.7559 + %.7563 =l add %.7562, 0 + %.7564 =l copy $g_518 + %.7565 =l mul 40, 1 + %.7566 =l add %.7564, %.7565 + %.7567 =l copy %.7566 + storel %.7567, %.7563 + %.7569 =l add %.7568, 0 + %.7570 =l copy $g_185 + %.7571 =l mul 48, 1 + %.7572 =l add %.7570, %.7571 + %.7573 =l copy %.7572 + storel %.7573, %.7569 + %.7575 =l add %.7574, 0 + %.7576 =l extsw 0 + %.7577 =l copy %.7576 + storel %.7577, %.7575 + %.7578 =l add %.7574, 8 + %.7579 =l extsw 0 + %.7580 =l copy %.7579 + storel %.7580, %.7578 + %.7581 =l add %.7574, 16 + %.7582 =l extsw 0 + %.7583 =l copy %.7582 + storel %.7583, %.7581 + %.7584 =l add %.7574, 24 + %.7585 =l extsw 0 + %.7586 =l copy %.7585 + storel %.7586, %.7584 + %.7587 =l add %.7574, 32 + %.7588 =l extsw 0 + %.7589 =l copy %.7588 + storel %.7589, %.7587 + %.7590 =l add %.7574, 40 + %.7591 =l extsw 0 + %.7592 =l copy %.7591 + storel %.7592, %.7590 + %.7593 =l add %.7574, 48 + %.7594 =l extsw 0 + %.7595 =l copy %.7594 + storel %.7595, %.7593 + %.7596 =l add %.7574, 56 + %.7597 =l extsw 0 + %.7598 =l copy %.7597 + storel %.7598, %.7596 + %.7599 =l add %.7574, 64 + %.7600 =l extsw 0 + %.7601 =l copy %.7600 + storel %.7601, %.7599 + %.7603 =w loaduw %.7516 + %.7604 =w cnew %.7603, 0 + jnz %.7604, @if_true.985, @if_false.986 +@if_true.985 + jmp @for_join.984 +@if_false.986 + %.7605 =l extsw 3 + %.7606 =l mul %.7605, 252 + %.7607 =l add %.7004, %.7606 + %.7608 =l extsw 6 + %.7609 =l mul %.7608, 28 + %.7610 =l add %.7607, %.7609 + %.7611 =l extsw 2 + %.7612 =l mul %.7611, 4 + %.7613 =l add %.7610, %.7612 + %.7614 =w loaduw %.7613 + %.7615 =w sub %.7614, 1 + storew %.7615, %.7613 +@for_cont.983 + %.7616 =w loadsw $g_24 + %.7617 =w add %.7616, 1 + storew %.7617, $g_24 + jmp @for_cond.981 +@for_join.984 +@if_join.980 + %.7618 =w loadsh %.6850 + %.7619 =w extsh %.7618 + %.7620 =l extsw 4 + %.7621 =l mul %.7620, 4 + %.7622 =l add %.6871, %.7621 + storew %.7619, %.7622 + %.7623 =l loadl %.1 + %.7624 =w loadsw %.7623 + %.7625 =l loadl $g_1038 + %.7626 =l loadl %.7625 + %.7627 =w loaduw %.7626 + %.7628 =w loadsw %.6861 + %.7629 =w or %.7624, %.7628 + storew %.7629, %.7623 +@if_join.975 + %.7630 =l extsw 1 + %.7631 =l mul %.7630, 4 + %.7632 =l add %.5202, %.7631 + %.7633 =w loadsw %.7632 + %.7634 =l extsw %.7633 + ret %.7634 +@for_cont.892 + %.7635 =l loadl $g_82 + %.7636 =l extsw 1 + %.7637 =l sub %.7635, %.7636 + storel %.7637, $g_82 + jmp @for_cond.890 +@for_join.893 +@if_join.881 + %.7638 =w loadub %.109 + %.7639 =l extub %.7638 + ret %.7639 +} +function l $func_4(l %.1, l %.3, l %.5) { +@start.987 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.6 =l alloc8 8 + storel %.5, %.6 +@body.988 + %.7 =l loadl %.2 + ret %.7 +} +function l $func_8(w %.1) { +@start.989 + %.2 =l alloc4 4 + storew %.1, %.2 + %.3 =l alloc8 8 + %.14 =l alloc8 8 + %.15 =l alloc4 20 + %.29 =l alloc8 1960 + %.877 =l alloc4 56 + %.934 =l alloc8 8 + %.936 =l alloc8 8 + %.938 =l alloc8 1568 + %.1647 =l alloc8 56 + %.1669 =l alloc8 8 + %.1675 =l alloc8 8 + %.1677 =l alloc8 8 + %.1679 =l alloc8 8 + %.1684 =l alloc8 8 + %.1688 =l alloc4 2 + %.1689 =l alloc4 2 + %.1692 =l alloc4 4 + %.1693 =l alloc4 4 + %.1694 =l alloc4 4 +@body.990 + %.4 =l add %.3, 0 + %.5 =l extsw 7 + %.6 =l mul %.5, 12 + %.7 =l add $g_13, %.6 + %.8 =l extsw 0 + %.9 =l mul %.8, 4 + %.10 =l add %.7, %.9 + %.11 =l extsw 0 + %.12 =l mul %.11, 4 + %.13 =l add %.10, %.12 + storel %.13, %.4 + %.16 =l add %.15, 0 + %.17 =l extsw 0 + %.18 =l sub %.17, 6 + %.19 =w copy %.18 + storew %.19, %.16 + %.20 =l add %.15, 4 + %.21 =w copy 0 + storew %.21, %.20 + %.22 =l add %.15, 8 + %.23 =w copy 4800 + storeh %.23, %.22 + %.24 =l add %.15, 10 + storeh 0, %.24 + %.25 =l add %.15, 12 + %.26 =w copy 2965183498 + storew %.26, %.25 + %.27 =l add %.15, 16 + %.28 =w copy 18446744073709551615 + storew %.28, %.27 + %.30 =l add %.29, 0 + %.31 =l copy $g_185 + %.32 =l mul 8, 1 + %.33 =l add %.31, %.32 + %.34 =l copy %.33 + storel %.34, %.30 + %.35 =l add %.29, 8 + %.36 =l copy $g_518 + %.37 =l mul 8, 1 + %.38 =l add %.36, %.37 + %.39 =l copy %.38 + storel %.39, %.35 + %.40 =l add %.29, 16 + %.41 =l extsw 0 + %.42 =l copy %.41 + storel %.42, %.40 + %.43 =l add %.29, 24 + %.44 =l copy $g_518 + %.45 =l mul 8, 1 + %.46 =l add %.44, %.45 + %.47 =l copy %.46 + storel %.47, %.43 + %.48 =l add %.29, 32 + %.49 =l extsw 0 + %.50 =l copy %.49 + storel %.50, %.48 + %.51 =l add %.29, 40 + %.52 =l extsw 0 + %.53 =l copy %.52 + storel %.53, %.51 + %.54 =l add %.29, 48 + %.55 =l copy $g_265 + %.56 =l mul 8, 1 + %.57 =l add %.55, %.56 + %.58 =l copy %.57 + storel %.58, %.54 + %.59 =l add %.29, 56 + storel $g_82, %.59 + %.60 =l add %.29, 64 + %.61 =l copy $g_518 + %.62 =l mul 8, 1 + %.63 =l add %.61, %.62 + %.64 =l copy %.63 + storel %.64, %.60 + %.65 =l add %.29, 72 + %.66 =l extsw 0 + %.67 =l copy %.66 + storel %.67, %.65 + %.68 =l add %.29, 80 + storel $g_82, %.68 + %.69 =l add %.29, 88 + storel $g_80, %.69 + %.70 =l add %.29, 96 + storel $g_82, %.70 + %.71 =l add %.29, 104 + %.72 =l extsw 0 + %.73 =l copy %.72 + storel %.73, %.71 + %.74 =l add %.29, 112 + %.75 =l copy $g_1183 + %.76 =l mul 8, 1 + %.77 =l add %.75, %.76 + %.78 =l copy %.77 + storel %.78, %.74 + %.79 =l add %.29, 120 + %.80 =l extsw 0 + %.81 =l copy %.80 + storel %.81, %.79 + %.82 =l add %.29, 128 + %.83 =l copy $g_265 + %.84 =l mul 8, 1 + %.85 =l add %.83, %.84 + %.86 =l copy %.85 + storel %.86, %.82 + %.87 =l add %.29, 136 + %.88 =l copy $g_185 + %.89 =l mul 8, 1 + %.90 =l add %.88, %.89 + %.91 =l copy %.90 + storel %.91, %.87 + %.92 =l add %.29, 144 + %.93 =l copy $g_185 + %.94 =l mul 8, 1 + %.95 =l add %.93, %.94 + %.96 =l copy %.95 + storel %.96, %.92 + %.97 =l add %.29, 152 + %.98 =l copy $g_185 + %.99 =l mul 8, 1 + %.100 =l add %.98, %.99 + %.101 =l copy %.100 + storel %.101, %.97 + %.102 =l add %.29, 160 + %.103 =l extsw 0 + %.104 =l copy %.103 + storel %.104, %.102 + %.105 =l add %.29, 168 + storel $g_80, %.105 + %.106 =l add %.29, 176 + %.107 =l extsw 0 + %.108 =l copy %.107 + storel %.108, %.106 + %.109 =l add %.29, 184 + storel $g_80, %.109 + %.110 =l add %.29, 192 + %.111 =l copy $g_518 + %.112 =l mul 8, 1 + %.113 =l add %.111, %.112 + %.114 =l copy %.113 + storel %.114, %.110 + %.115 =l add %.29, 200 + %.116 =l copy $g_185 + %.117 =l mul 8, 1 + %.118 =l add %.116, %.117 + %.119 =l copy %.118 + storel %.119, %.115 + %.120 =l add %.29, 208 + %.121 =l copy $g_185 + %.122 =l mul 8, 1 + %.123 =l add %.121, %.122 + %.124 =l copy %.123 + storel %.124, %.120 + %.125 =l add %.29, 216 + %.126 =l copy $g_518 + %.127 =l mul 8, 1 + %.128 =l add %.126, %.127 + %.129 =l copy %.128 + storel %.129, %.125 + %.130 =l add %.29, 224 + storel $g_80, %.130 + %.131 =l add %.29, 232 + %.132 =l copy $g_265 + %.133 =l mul 8, 1 + %.134 =l add %.132, %.133 + %.135 =l copy %.134 + storel %.135, %.131 + %.136 =l add %.29, 240 + storel $g_80, %.136 + %.137 =l add %.29, 248 + %.138 =l copy $g_185 + %.139 =l mul 8, 1 + %.140 =l add %.138, %.139 + %.141 =l copy %.140 + storel %.141, %.137 + %.142 =l add %.29, 256 + %.143 =l copy $g_185 + %.144 =l mul 8, 1 + %.145 =l add %.143, %.144 + %.146 =l copy %.145 + storel %.146, %.142 + %.147 =l add %.29, 264 + %.148 =l copy $g_1183 + %.149 =l mul 8, 1 + %.150 =l add %.148, %.149 + %.151 =l copy %.150 + storel %.151, %.147 + %.152 =l add %.29, 272 + %.153 =l copy $g_518 + %.154 =l mul 8, 1 + %.155 =l add %.153, %.154 + %.156 =l copy %.155 + storel %.156, %.152 + %.157 =l add %.29, 280 + storel $g_80, %.157 + %.158 =l add %.29, 288 + storel $g_80, %.158 + %.159 =l add %.29, 296 + %.160 =l copy $g_518 + %.161 =l mul 8, 1 + %.162 =l add %.160, %.161 + %.163 =l copy %.162 + storel %.163, %.159 + %.164 =l add %.29, 304 + %.165 =l copy $g_518 + %.166 =l mul 8, 1 + %.167 =l add %.165, %.166 + %.168 =l copy %.167 + storel %.168, %.164 + %.169 =l add %.29, 312 + %.170 =l copy $g_518 + %.171 =l mul 8, 1 + %.172 =l add %.170, %.171 + %.173 =l copy %.172 + storel %.173, %.169 + %.174 =l add %.29, 320 + %.175 =l copy $g_185 + %.176 =l mul 8, 1 + %.177 =l add %.175, %.176 + %.178 =l copy %.177 + storel %.178, %.174 + %.179 =l add %.29, 328 + %.180 =l extsw 0 + %.181 =l copy %.180 + storel %.181, %.179 + %.182 =l add %.29, 336 + %.183 =l copy $g_1183 + %.184 =l mul 8, 1 + %.185 =l add %.183, %.184 + %.186 =l copy %.185 + storel %.186, %.182 + %.187 =l add %.29, 344 + %.188 =l copy $g_265 + %.189 =l mul 8, 1 + %.190 =l add %.188, %.189 + %.191 =l copy %.190 + storel %.191, %.187 + %.192 =l add %.29, 352 + %.193 =l copy $g_185 + %.194 =l mul 8, 1 + %.195 =l add %.193, %.194 + %.196 =l copy %.195 + storel %.196, %.192 + %.197 =l add %.29, 360 + %.198 =l copy $g_1183 + %.199 =l mul 8, 1 + %.200 =l add %.198, %.199 + %.201 =l copy %.200 + storel %.201, %.197 + %.202 =l add %.29, 368 + %.203 =l extsw 0 + %.204 =l copy %.203 + storel %.204, %.202 + %.205 =l add %.29, 376 + %.206 =l copy $g_1183 + %.207 =l mul 8, 1 + %.208 =l add %.206, %.207 + %.209 =l copy %.208 + storel %.209, %.205 + %.210 =l add %.29, 384 + %.211 =l copy $g_185 + %.212 =l mul 8, 1 + %.213 =l add %.211, %.212 + %.214 =l copy %.213 + storel %.214, %.210 + %.215 =l add %.29, 392 + storel $g_80, %.215 + %.216 =l add %.29, 400 + %.217 =l extsw 0 + %.218 =l copy %.217 + storel %.218, %.216 + %.219 =l add %.29, 408 + %.220 =l extsw 0 + %.221 =l copy %.220 + storel %.221, %.219 + %.222 =l add %.29, 416 + %.223 =l copy $g_185 + %.224 =l mul 8, 1 + %.225 =l add %.223, %.224 + %.226 =l copy %.225 + storel %.226, %.222 + %.227 =l add %.29, 424 + %.228 =l copy $g_185 + %.229 =l mul 8, 1 + %.230 =l add %.228, %.229 + %.231 =l copy %.230 + storel %.231, %.227 + %.232 =l add %.29, 432 + %.233 =l copy $g_185 + %.234 =l mul 8, 1 + %.235 =l add %.233, %.234 + %.236 =l copy %.235 + storel %.236, %.232 + %.237 =l add %.29, 440 + %.238 =l extsw 0 + %.239 =l copy %.238 + storel %.239, %.237 + %.240 =l add %.29, 448 + %.241 =l copy $g_185 + %.242 =l mul 8, 1 + %.243 =l add %.241, %.242 + %.244 =l copy %.243 + storel %.244, %.240 + %.245 =l add %.29, 456 + %.246 =l copy $g_185 + %.247 =l mul 8, 1 + %.248 =l add %.246, %.247 + %.249 =l copy %.248 + storel %.249, %.245 + %.250 =l add %.29, 464 + %.251 =l extsw 0 + %.252 =l copy %.251 + storel %.252, %.250 + %.253 =l add %.29, 472 + %.254 =l copy $g_185 + %.255 =l mul 8, 1 + %.256 =l add %.254, %.255 + %.257 =l copy %.256 + storel %.257, %.253 + %.258 =l add %.29, 480 + %.259 =l copy $g_1183 + %.260 =l mul 8, 1 + %.261 =l add %.259, %.260 + %.262 =l copy %.261 + storel %.262, %.258 + %.263 =l add %.29, 488 + %.264 =l copy $g_185 + %.265 =l mul 8, 1 + %.266 =l add %.264, %.265 + %.267 =l copy %.266 + storel %.267, %.263 + %.268 =l add %.29, 496 + %.269 =l copy $g_265 + %.270 =l mul 8, 1 + %.271 =l add %.269, %.270 + %.272 =l copy %.271 + storel %.272, %.268 + %.273 =l add %.29, 504 + %.274 =l extsw 0 + %.275 =l copy %.274 + storel %.275, %.273 + %.276 =l add %.29, 512 + %.277 =l copy $g_1183 + %.278 =l mul 8, 1 + %.279 =l add %.277, %.278 + %.280 =l copy %.279 + storel %.280, %.276 + %.281 =l add %.29, 520 + %.282 =l copy $g_518 + %.283 =l mul 8, 1 + %.284 =l add %.282, %.283 + %.285 =l copy %.284 + storel %.285, %.281 + %.286 =l add %.29, 528 + storel $g_80, %.286 + %.287 =l add %.29, 536 + %.288 =l extsw 0 + %.289 =l copy %.288 + storel %.289, %.287 + %.290 =l add %.29, 544 + storel $g_82, %.290 + %.291 =l add %.29, 552 + %.292 =l copy $g_518 + %.293 =l mul 8, 1 + %.294 =l add %.292, %.293 + %.295 =l copy %.294 + storel %.295, %.291 + %.296 =l add %.29, 560 + %.297 =l extsw 0 + %.298 =l copy %.297 + storel %.298, %.296 + %.299 =l add %.29, 568 + storel $g_80, %.299 + %.300 =l add %.29, 576 + %.301 =l copy $g_1183 + %.302 =l mul 8, 1 + %.303 =l add %.301, %.302 + %.304 =l copy %.303 + storel %.304, %.300 + %.305 =l add %.29, 584 + %.306 =l copy $g_1183 + %.307 =l mul 8, 1 + %.308 =l add %.306, %.307 + %.309 =l copy %.308 + storel %.309, %.305 + %.310 =l add %.29, 592 + storel $g_80, %.310 + %.311 =l add %.29, 600 + %.312 =l extsw 0 + %.313 =l copy %.312 + storel %.313, %.311 + %.314 =l add %.29, 608 + %.315 =l copy $g_518 + %.316 =l mul 8, 1 + %.317 =l add %.315, %.316 + %.318 =l copy %.317 + storel %.318, %.314 + %.319 =l add %.29, 616 + %.320 =l extsw 0 + %.321 =l copy %.320 + storel %.321, %.319 + %.322 =l add %.29, 624 + storel $g_80, %.322 + %.323 =l add %.29, 632 + %.324 =l copy $g_185 + %.325 =l mul 8, 1 + %.326 =l add %.324, %.325 + %.327 =l copy %.326 + storel %.327, %.323 + %.328 =l add %.29, 640 + %.329 =l copy $g_518 + %.330 =l mul 8, 1 + %.331 =l add %.329, %.330 + %.332 =l copy %.331 + storel %.332, %.328 + %.333 =l add %.29, 648 + %.334 =l extsw 0 + %.335 =l copy %.334 + storel %.335, %.333 + %.336 =l add %.29, 656 + storel $g_82, %.336 + %.337 =l add %.29, 664 + storel $g_80, %.337 + %.338 =l add %.29, 672 + %.339 =l copy $g_1183 + %.340 =l mul 8, 1 + %.341 =l add %.339, %.340 + %.342 =l copy %.341 + storel %.342, %.338 + %.343 =l add %.29, 680 + %.344 =l copy $g_185 + %.345 =l mul 8, 1 + %.346 =l add %.344, %.345 + %.347 =l copy %.346 + storel %.347, %.343 + %.348 =l add %.29, 688 + %.349 =l copy $g_1183 + %.350 =l mul 8, 1 + %.351 =l add %.349, %.350 + %.352 =l copy %.351 + storel %.352, %.348 + %.353 =l add %.29, 696 + %.354 =l extsw 0 + %.355 =l copy %.354 + storel %.355, %.353 + %.356 =l add %.29, 704 + %.357 =l copy $g_518 + %.358 =l mul 8, 1 + %.359 =l add %.357, %.358 + %.360 =l copy %.359 + storel %.360, %.356 + %.361 =l add %.29, 712 + %.362 =l copy $g_265 + %.363 =l mul 8, 1 + %.364 =l add %.362, %.363 + %.365 =l copy %.364 + storel %.365, %.361 + %.366 =l add %.29, 720 + %.367 =l copy $g_185 + %.368 =l mul 8, 1 + %.369 =l add %.367, %.368 + %.370 =l copy %.369 + storel %.370, %.366 + %.371 =l add %.29, 728 + %.372 =l extsw 0 + %.373 =l copy %.372 + storel %.373, %.371 + %.374 =l add %.29, 736 + %.375 =l copy $g_518 + %.376 =l mul 8, 1 + %.377 =l add %.375, %.376 + %.378 =l copy %.377 + storel %.378, %.374 + %.379 =l add %.29, 744 + %.380 =l copy $g_1183 + %.381 =l mul 8, 1 + %.382 =l add %.380, %.381 + %.383 =l copy %.382 + storel %.383, %.379 + %.384 =l add %.29, 752 + %.385 =l copy $g_518 + %.386 =l mul 8, 1 + %.387 =l add %.385, %.386 + %.388 =l copy %.387 + storel %.388, %.384 + %.389 =l add %.29, 760 + %.390 =l extsw 0 + %.391 =l copy %.390 + storel %.391, %.389 + %.392 =l add %.29, 768 + %.393 =l copy $g_185 + %.394 =l mul 8, 1 + %.395 =l add %.393, %.394 + %.396 =l copy %.395 + storel %.396, %.392 + %.397 =l add %.29, 776 + storel $g_82, %.397 + %.398 =l add %.29, 784 + %.399 =l extsw 0 + %.400 =l copy %.399 + storel %.400, %.398 + %.401 =l add %.29, 792 + %.402 =l extsw 0 + %.403 =l copy %.402 + storel %.403, %.401 + %.404 =l add %.29, 800 + %.405 =l copy $g_185 + %.406 =l mul 8, 1 + %.407 =l add %.405, %.406 + %.408 =l copy %.407 + storel %.408, %.404 + %.409 =l add %.29, 808 + storel $g_80, %.409 + %.410 =l add %.29, 816 + %.411 =l copy $g_1183 + %.412 =l mul 8, 1 + %.413 =l add %.411, %.412 + %.414 =l copy %.413 + storel %.414, %.410 + %.415 =l add %.29, 824 + %.416 =l copy $g_185 + %.417 =l mul 8, 1 + %.418 =l add %.416, %.417 + %.419 =l copy %.418 + storel %.419, %.415 + %.420 =l add %.29, 832 + storel $g_82, %.420 + %.421 =l add %.29, 840 + storel $g_82, %.421 + %.422 =l add %.29, 848 + storel $g_80, %.422 + %.423 =l add %.29, 856 + %.424 =l copy $g_265 + %.425 =l mul 8, 1 + %.426 =l add %.424, %.425 + %.427 =l copy %.426 + storel %.427, %.423 + %.428 =l add %.29, 864 + %.429 =l copy $g_1183 + %.430 =l mul 8, 1 + %.431 =l add %.429, %.430 + %.432 =l copy %.431 + storel %.432, %.428 + %.433 =l add %.29, 872 + %.434 =l copy $g_518 + %.435 =l mul 8, 1 + %.436 =l add %.434, %.435 + %.437 =l copy %.436 + storel %.437, %.433 + %.438 =l add %.29, 880 + storel $g_82, %.438 + %.439 =l add %.29, 888 + storel $g_80, %.439 + %.440 =l add %.29, 896 + %.441 =l extsw 0 + %.442 =l copy %.441 + storel %.442, %.440 + %.443 =l add %.29, 904 + storel $g_80, %.443 + %.444 =l add %.29, 912 + storel $g_82, %.444 + %.445 =l add %.29, 920 + %.446 =l copy $g_265 + %.447 =l mul 8, 1 + %.448 =l add %.446, %.447 + %.449 =l copy %.448 + storel %.449, %.445 + %.450 =l add %.29, 928 + storel $g_82, %.450 + %.451 =l add %.29, 936 + %.452 =l copy $g_265 + %.453 =l mul 8, 1 + %.454 =l add %.452, %.453 + %.455 =l copy %.454 + storel %.455, %.451 + %.456 =l add %.29, 944 + storel $g_82, %.456 + %.457 =l add %.29, 952 + storel $g_82, %.457 + %.458 =l add %.29, 960 + storel $g_82, %.458 + %.459 =l add %.29, 968 + %.460 =l copy $g_518 + %.461 =l mul 8, 1 + %.462 =l add %.460, %.461 + %.463 =l copy %.462 + storel %.463, %.459 + %.464 =l add %.29, 976 + %.465 =l copy $g_1183 + %.466 =l mul 8, 1 + %.467 =l add %.465, %.466 + %.468 =l copy %.467 + storel %.468, %.464 + %.469 =l add %.29, 984 + storel $g_82, %.469 + %.470 =l add %.29, 992 + %.471 =l copy $g_185 + %.472 =l mul 8, 1 + %.473 =l add %.471, %.472 + %.474 =l copy %.473 + storel %.474, %.470 + %.475 =l add %.29, 1000 + %.476 =l extsw 0 + %.477 =l copy %.476 + storel %.477, %.475 + %.478 =l add %.29, 1008 + %.479 =l copy $g_265 + %.480 =l mul 8, 1 + %.481 =l add %.479, %.480 + %.482 =l copy %.481 + storel %.482, %.478 + %.483 =l add %.29, 1016 + storel $g_82, %.483 + %.484 =l add %.29, 1024 + %.485 =l copy $g_1183 + %.486 =l mul 8, 1 + %.487 =l add %.485, %.486 + %.488 =l copy %.487 + storel %.488, %.484 + %.489 =l add %.29, 1032 + %.490 =l copy $g_1183 + %.491 =l mul 8, 1 + %.492 =l add %.490, %.491 + %.493 =l copy %.492 + storel %.493, %.489 + %.494 =l add %.29, 1040 + %.495 =l copy $g_265 + %.496 =l mul 8, 1 + %.497 =l add %.495, %.496 + %.498 =l copy %.497 + storel %.498, %.494 + %.499 =l add %.29, 1048 + %.500 =l copy $g_265 + %.501 =l mul 8, 1 + %.502 =l add %.500, %.501 + %.503 =l copy %.502 + storel %.503, %.499 + %.504 =l add %.29, 1056 + %.505 =l copy $g_1183 + %.506 =l mul 8, 1 + %.507 =l add %.505, %.506 + %.508 =l copy %.507 + storel %.508, %.504 + %.509 =l add %.29, 1064 + %.510 =l copy $g_1183 + %.511 =l mul 8, 1 + %.512 =l add %.510, %.511 + %.513 =l copy %.512 + storel %.513, %.509 + %.514 =l add %.29, 1072 + %.515 =l copy $g_185 + %.516 =l mul 8, 1 + %.517 =l add %.515, %.516 + %.518 =l copy %.517 + storel %.518, %.514 + %.519 =l add %.29, 1080 + storel $g_82, %.519 + %.520 =l add %.29, 1088 + %.521 =l copy $g_518 + %.522 =l mul 8, 1 + %.523 =l add %.521, %.522 + %.524 =l copy %.523 + storel %.524, %.520 + %.525 =l add %.29, 1096 + storel $g_82, %.525 + %.526 =l add %.29, 1104 + %.527 =l copy $g_1183 + %.528 =l mul 8, 1 + %.529 =l add %.527, %.528 + %.530 =l copy %.529 + storel %.530, %.526 + %.531 =l add %.29, 1112 + storel $g_80, %.531 + %.532 =l add %.29, 1120 + storel $g_80, %.532 + %.533 =l add %.29, 1128 + %.534 =l copy $g_185 + %.535 =l mul 8, 1 + %.536 =l add %.534, %.535 + %.537 =l copy %.536 + storel %.537, %.533 + %.538 =l add %.29, 1136 + %.539 =l copy $g_518 + %.540 =l mul 8, 1 + %.541 =l add %.539, %.540 + %.542 =l copy %.541 + storel %.542, %.538 + %.543 =l add %.29, 1144 + %.544 =l copy $g_518 + %.545 =l mul 8, 1 + %.546 =l add %.544, %.545 + %.547 =l copy %.546 + storel %.547, %.543 + %.548 =l add %.29, 1152 + storel $g_82, %.548 + %.549 =l add %.29, 1160 + %.550 =l copy $g_1183 + %.551 =l mul 8, 1 + %.552 =l add %.550, %.551 + %.553 =l copy %.552 + storel %.553, %.549 + %.554 =l add %.29, 1168 + %.555 =l copy $g_185 + %.556 =l mul 8, 1 + %.557 =l add %.555, %.556 + %.558 =l copy %.557 + storel %.558, %.554 + %.559 =l add %.29, 1176 + %.560 =l copy $g_185 + %.561 =l mul 8, 1 + %.562 =l add %.560, %.561 + %.563 =l copy %.562 + storel %.563, %.559 + %.564 =l add %.29, 1184 + storel $g_82, %.564 + %.565 =l add %.29, 1192 + storel $g_80, %.565 + %.566 =l add %.29, 1200 + %.567 =l copy $g_185 + %.568 =l mul 8, 1 + %.569 =l add %.567, %.568 + %.570 =l copy %.569 + storel %.570, %.566 + %.571 =l add %.29, 1208 + %.572 =l copy $g_518 + %.573 =l mul 8, 1 + %.574 =l add %.572, %.573 + %.575 =l copy %.574 + storel %.575, %.571 + %.576 =l add %.29, 1216 + storel $g_82, %.576 + %.577 =l add %.29, 1224 + storel $g_82, %.577 + %.578 =l add %.29, 1232 + %.579 =l copy $g_265 + %.580 =l mul 8, 1 + %.581 =l add %.579, %.580 + %.582 =l copy %.581 + storel %.582, %.578 + %.583 =l add %.29, 1240 + %.584 =l copy $g_265 + %.585 =l mul 8, 1 + %.586 =l add %.584, %.585 + %.587 =l copy %.586 + storel %.587, %.583 + %.588 =l add %.29, 1248 + %.589 =l copy $g_1183 + %.590 =l mul 8, 1 + %.591 =l add %.589, %.590 + %.592 =l copy %.591 + storel %.592, %.588 + %.593 =l add %.29, 1256 + %.594 =l copy $g_185 + %.595 =l mul 8, 1 + %.596 =l add %.594, %.595 + %.597 =l copy %.596 + storel %.597, %.593 + %.598 =l add %.29, 1264 + %.599 =l copy $g_1183 + %.600 =l mul 8, 1 + %.601 =l add %.599, %.600 + %.602 =l copy %.601 + storel %.602, %.598 + %.603 =l add %.29, 1272 + %.604 =l copy $g_265 + %.605 =l mul 8, 1 + %.606 =l add %.604, %.605 + %.607 =l copy %.606 + storel %.607, %.603 + %.608 =l add %.29, 1280 + %.609 =l copy $g_265 + %.610 =l mul 8, 1 + %.611 =l add %.609, %.610 + %.612 =l copy %.611 + storel %.612, %.608 + %.613 =l add %.29, 1288 + %.614 =l extsw 0 + %.615 =l copy %.614 + storel %.615, %.613 + %.616 =l add %.29, 1296 + storel $g_82, %.616 + %.617 =l add %.29, 1304 + %.618 =l extsw 0 + %.619 =l copy %.618 + storel %.619, %.617 + %.620 =l add %.29, 1312 + storel $g_82, %.620 + %.621 =l add %.29, 1320 + %.622 =l extsw 0 + %.623 =l copy %.622 + storel %.623, %.621 + %.624 =l add %.29, 1328 + %.625 =l copy $g_185 + %.626 =l mul 8, 1 + %.627 =l add %.625, %.626 + %.628 =l copy %.627 + storel %.628, %.624 + %.629 =l add %.29, 1336 + storel $g_82, %.629 + %.630 =l add %.29, 1344 + %.631 =l copy $g_518 + %.632 =l mul 8, 1 + %.633 =l add %.631, %.632 + %.634 =l copy %.633 + storel %.634, %.630 + %.635 =l add %.29, 1352 + %.636 =l copy $g_185 + %.637 =l mul 8, 1 + %.638 =l add %.636, %.637 + %.639 =l copy %.638 + storel %.639, %.635 + %.640 =l add %.29, 1360 + %.641 =l copy $g_265 + %.642 =l mul 8, 1 + %.643 =l add %.641, %.642 + %.644 =l copy %.643 + storel %.644, %.640 + %.645 =l add %.29, 1368 + storel $g_80, %.645 + %.646 =l add %.29, 1376 + %.647 =l copy $g_518 + %.648 =l mul 8, 1 + %.649 =l add %.647, %.648 + %.650 =l copy %.649 + storel %.650, %.646 + %.651 =l add %.29, 1384 + %.652 =l extsw 0 + %.653 =l copy %.652 + storel %.653, %.651 + %.654 =l add %.29, 1392 + storel $g_82, %.654 + %.655 =l add %.29, 1400 + storel $g_80, %.655 + %.656 =l add %.29, 1408 + storel $g_80, %.656 + %.657 =l add %.29, 1416 + storel $g_82, %.657 + %.658 =l add %.29, 1424 + %.659 =l copy $g_265 + %.660 =l mul 8, 1 + %.661 =l add %.659, %.660 + %.662 =l copy %.661 + storel %.662, %.658 + %.663 =l add %.29, 1432 + storel $g_80, %.663 + %.664 =l add %.29, 1440 + %.665 =l copy $g_518 + %.666 =l mul 8, 1 + %.667 =l add %.665, %.666 + %.668 =l copy %.667 + storel %.668, %.664 + %.669 =l add %.29, 1448 + storel $g_80, %.669 + %.670 =l add %.29, 1456 + %.671 =l copy $g_518 + %.672 =l mul 8, 1 + %.673 =l add %.671, %.672 + %.674 =l copy %.673 + storel %.674, %.670 + %.675 =l add %.29, 1464 + storel $g_80, %.675 + %.676 =l add %.29, 1472 + %.677 =l copy $g_1183 + %.678 =l mul 8, 1 + %.679 =l add %.677, %.678 + %.680 =l copy %.679 + storel %.680, %.676 + %.681 =l add %.29, 1480 + %.682 =l extsw 0 + %.683 =l copy %.682 + storel %.683, %.681 + %.684 =l add %.29, 1488 + %.685 =l extsw 0 + %.686 =l copy %.685 + storel %.686, %.684 + %.687 =l add %.29, 1496 + %.688 =l copy $g_185 + %.689 =l mul 8, 1 + %.690 =l add %.688, %.689 + %.691 =l copy %.690 + storel %.691, %.687 + %.692 =l add %.29, 1504 + storel $g_80, %.692 + %.693 =l add %.29, 1512 + %.694 =l extsw 0 + %.695 =l copy %.694 + storel %.695, %.693 + %.696 =l add %.29, 1520 + %.697 =l copy $g_185 + %.698 =l mul 8, 1 + %.699 =l add %.697, %.698 + %.700 =l copy %.699 + storel %.700, %.696 + %.701 =l add %.29, 1528 + storel $g_82, %.701 + %.702 =l add %.29, 1536 + %.703 =l copy $g_1183 + %.704 =l mul 8, 1 + %.705 =l add %.703, %.704 + %.706 =l copy %.705 + storel %.706, %.702 + %.707 =l add %.29, 1544 + %.708 =l copy $g_518 + %.709 =l mul 8, 1 + %.710 =l add %.708, %.709 + %.711 =l copy %.710 + storel %.711, %.707 + %.712 =l add %.29, 1552 + %.713 =l extsw 0 + %.714 =l copy %.713 + storel %.714, %.712 + %.715 =l add %.29, 1560 + %.716 =l extsw 0 + %.717 =l copy %.716 + storel %.717, %.715 + %.718 =l add %.29, 1568 + %.719 =l copy $g_265 + %.720 =l mul 8, 1 + %.721 =l add %.719, %.720 + %.722 =l copy %.721 + storel %.722, %.718 + %.723 =l add %.29, 1576 + storel $g_80, %.723 + %.724 =l add %.29, 1584 + %.725 =l copy $g_185 + %.726 =l mul 8, 1 + %.727 =l add %.725, %.726 + %.728 =l copy %.727 + storel %.728, %.724 + %.729 =l add %.29, 1592 + %.730 =l copy $g_518 + %.731 =l mul 8, 1 + %.732 =l add %.730, %.731 + %.733 =l copy %.732 + storel %.733, %.729 + %.734 =l add %.29, 1600 + %.735 =l copy $g_185 + %.736 =l mul 8, 1 + %.737 =l add %.735, %.736 + %.738 =l copy %.737 + storel %.738, %.734 + %.739 =l add %.29, 1608 + %.740 =l copy $g_265 + %.741 =l mul 8, 1 + %.742 =l add %.740, %.741 + %.743 =l copy %.742 + storel %.743, %.739 + %.744 =l add %.29, 1616 + storel $g_82, %.744 + %.745 =l add %.29, 1624 + %.746 =l copy $g_185 + %.747 =l mul 8, 1 + %.748 =l add %.746, %.747 + %.749 =l copy %.748 + storel %.749, %.745 + %.750 =l add %.29, 1632 + %.751 =l extsw 0 + %.752 =l copy %.751 + storel %.752, %.750 + %.753 =l add %.29, 1640 + storel $g_80, %.753 + %.754 =l add %.29, 1648 + storel $g_82, %.754 + %.755 =l add %.29, 1656 + storel $g_82, %.755 + %.756 =l add %.29, 1664 + storel $g_80, %.756 + %.757 =l add %.29, 1672 + %.758 =l extsw 0 + %.759 =l copy %.758 + storel %.759, %.757 + %.760 =l add %.29, 1680 + storel $g_80, %.760 + %.761 =l add %.29, 1688 + %.762 =l copy $g_185 + %.763 =l mul 8, 1 + %.764 =l add %.762, %.763 + %.765 =l copy %.764 + storel %.765, %.761 + %.766 =l add %.29, 1696 + %.767 =l copy $g_518 + %.768 =l mul 8, 1 + %.769 =l add %.767, %.768 + %.770 =l copy %.769 + storel %.770, %.766 + %.771 =l add %.29, 1704 + %.772 =l extsw 0 + %.773 =l copy %.772 + storel %.773, %.771 + %.774 =l add %.29, 1712 + storel $g_82, %.774 + %.775 =l add %.29, 1720 + %.776 =l copy $g_518 + %.777 =l mul 8, 1 + %.778 =l add %.776, %.777 + %.779 =l copy %.778 + storel %.779, %.775 + %.780 =l add %.29, 1728 + %.781 =l extsw 0 + %.782 =l copy %.781 + storel %.782, %.780 + %.783 =l add %.29, 1736 + %.784 =l copy $g_1183 + %.785 =l mul 8, 1 + %.786 =l add %.784, %.785 + %.787 =l copy %.786 + storel %.787, %.783 + %.788 =l add %.29, 1744 + %.789 =l copy $g_518 + %.790 =l mul 8, 1 + %.791 =l add %.789, %.790 + %.792 =l copy %.791 + storel %.792, %.788 + %.793 =l add %.29, 1752 + storel $g_80, %.793 + %.794 =l add %.29, 1760 + storel $g_82, %.794 + %.795 =l add %.29, 1768 + %.796 =l extsw 0 + %.797 =l copy %.796 + storel %.797, %.795 + %.798 =l add %.29, 1776 + %.799 =l copy $g_518 + %.800 =l mul 8, 1 + %.801 =l add %.799, %.800 + %.802 =l copy %.801 + storel %.802, %.798 + %.803 =l add %.29, 1784 + storel $g_82, %.803 + %.804 =l add %.29, 1792 + %.805 =l copy $g_265 + %.806 =l mul 8, 1 + %.807 =l add %.805, %.806 + %.808 =l copy %.807 + storel %.808, %.804 + %.809 =l add %.29, 1800 + %.810 =l copy $g_185 + %.811 =l mul 8, 1 + %.812 =l add %.810, %.811 + %.813 =l copy %.812 + storel %.813, %.809 + %.814 =l add %.29, 1808 + %.815 =l copy $g_1183 + %.816 =l mul 8, 1 + %.817 =l add %.815, %.816 + %.818 =l copy %.817 + storel %.818, %.814 + %.819 =l add %.29, 1816 + %.820 =l extsw 0 + %.821 =l copy %.820 + storel %.821, %.819 + %.822 =l add %.29, 1824 + %.823 =l copy $g_1183 + %.824 =l mul 8, 1 + %.825 =l add %.823, %.824 + %.826 =l copy %.825 + storel %.826, %.822 + %.827 =l add %.29, 1832 + %.828 =l copy $g_185 + %.829 =l mul 8, 1 + %.830 =l add %.828, %.829 + %.831 =l copy %.830 + storel %.831, %.827 + %.832 =l add %.29, 1840 + %.833 =l extsw 0 + %.834 =l copy %.833 + storel %.834, %.832 + %.835 =l add %.29, 1848 + storel $g_82, %.835 + %.836 =l add %.29, 1856 + storel $g_82, %.836 + %.837 =l add %.29, 1864 + %.838 =l copy $g_265 + %.839 =l mul 8, 1 + %.840 =l add %.838, %.839 + %.841 =l copy %.840 + storel %.841, %.837 + %.842 =l add %.29, 1872 + storel $g_82, %.842 + %.843 =l add %.29, 1880 + %.844 =l copy $g_265 + %.845 =l mul 8, 1 + %.846 =l add %.844, %.845 + %.847 =l copy %.846 + storel %.847, %.843 + %.848 =l add %.29, 1888 + %.849 =l copy $g_185 + %.850 =l mul 8, 1 + %.851 =l add %.849, %.850 + %.852 =l copy %.851 + storel %.852, %.848 + %.853 =l add %.29, 1896 + storel $g_82, %.853 + %.854 =l add %.29, 1904 + %.855 =l extsw 0 + %.856 =l copy %.855 + storel %.856, %.854 + %.857 =l add %.29, 1912 + storel $g_82, %.857 + %.858 =l add %.29, 1920 + %.859 =l extsw 0 + %.860 =l copy %.859 + storel %.860, %.858 + %.861 =l add %.29, 1928 + %.862 =l copy $g_518 + %.863 =l mul 8, 1 + %.864 =l add %.862, %.863 + %.865 =l copy %.864 + storel %.865, %.861 + %.866 =l add %.29, 1936 + storel $g_80, %.866 + %.867 =l add %.29, 1944 + %.868 =l copy $g_265 + %.869 =l mul 8, 1 + %.870 =l add %.868, %.869 + %.871 =l copy %.870 + storel %.871, %.867 + %.872 =l add %.29, 1952 + %.873 =l copy $g_185 + %.874 =l mul 8, 1 + %.875 =l add %.873, %.874 + %.876 =l copy %.875 + storel %.876, %.872 + %.878 =l add %.877, 0 + %.879 =w copy 57862 + storeh %.879, %.878 + %.880 =l add %.877, 2 + %.881 =w copy 0 + storeh %.881, %.880 + %.882 =l add %.877, 4 + %.883 =w copy 43252 + storeh %.883, %.882 + %.884 =l add %.877, 6 + %.885 =w copy 43252 + storeh %.885, %.884 + %.886 =l add %.877, 8 + %.887 =w copy 0 + storeh %.887, %.886 + %.888 =l add %.877, 10 + %.889 =w copy 57862 + storeh %.889, %.888 + %.890 =l add %.877, 12 + %.891 =w copy 0 + storeh %.891, %.890 + %.892 =l add %.877, 14 + %.893 =w copy 9 + storeh %.893, %.892 + %.894 =l add %.877, 16 + %.895 =w copy 1 + storeh %.895, %.894 + %.896 =l add %.877, 18 + %.897 =w copy 1 + storeh %.897, %.896 + %.898 =l add %.877, 20 + %.899 =w copy 9 + storeh %.899, %.898 + %.900 =l add %.877, 22 + %.901 =w copy 41442 + storeh %.901, %.900 + %.902 =l add %.877, 24 + %.903 =w copy 9 + storeh %.903, %.902 + %.904 =l add %.877, 26 + %.905 =w copy 1 + storeh %.905, %.904 + %.906 =l add %.877, 28 + %.907 =w copy 7 + storeh %.907, %.906 + %.908 =l add %.877, 30 + %.909 =w copy 7 + storeh %.909, %.908 + %.910 =l add %.877, 32 + %.911 =w copy 57862 + storeh %.911, %.910 + %.912 =l add %.877, 34 + %.913 =w copy 43252 + storeh %.913, %.912 + %.914 =l add %.877, 36 + %.915 =w copy 57862 + storeh %.915, %.914 + %.916 =l add %.877, 38 + %.917 =w copy 7 + storeh %.917, %.916 + %.918 =l add %.877, 40 + %.919 =w copy 7 + storeh %.919, %.918 + %.920 =l add %.877, 42 + %.921 =w copy 61416 + storeh %.921, %.920 + %.922 =l add %.877, 44 + %.923 =w copy 1 + storeh %.923, %.922 + %.924 =l add %.877, 46 + %.925 =w copy 51327 + storeh %.925, %.924 + %.926 =l add %.877, 48 + %.927 =w copy 1 + storeh %.927, %.926 + %.928 =l add %.877, 50 + %.929 =w copy 61416 + storeh %.929, %.928 + %.930 =l add %.877, 52 + %.931 =w copy 61416 + storeh %.931, %.930 + %.932 =l add %.877, 54 + %.933 =w copy 1 + storeh %.933, %.932 + %.935 =l add %.934, 0 + storel 4246175373668383303, %.935 + %.937 =l add %.936, 0 + storel $g_296, %.937 + %.939 =l add %.938, 0 + %.940 =w copy 7 + storeb %.940, %.939 + %.941 =l add %.938, 1 + storeb 0, %.941 + %.942 =l add %.938, 2 + storeh 0, %.942 + %.943 =l add %.938, 4 + storew 0, %.943 + %.944 =l add %.938, 8 + %.945 =l copy 12916396440129209738 + storel %.945, %.944 + %.946 =l add %.938, 16 + %.947 =w copy 2845575975 + storew %.947, %.946 + %.948 =l add %.938, 20 + storew 0, %.948 + %.949 =l add %.938, 24 + storel 16685243662073323047, %.949 + %.950 =l add %.938, 32 + %.951 =w copy 2128478778 + storew %.951, %.950 + %.952 =l add %.938, 36 + %.953 =w copy 1831715476 + storew %.953, %.952 + %.954 =l add %.938, 40 + %.955 =w copy 2458647541 + storew %.955, %.954 + %.956 =l add %.938, 44 + %.957 =w copy 1195810902 + storew %.957, %.956 + %.958 =l add %.938, 48 + %.959 =w copy 0 + storew %.959, %.958 + %.960 =l add %.938, 52 + storew 0, %.960 + %.961 =l add %.938, 56 + %.962 =w copy 4 + storeb %.962, %.961 + %.963 =l add %.938, 57 + storeb 0, %.963 + %.964 =l add %.938, 58 + storeh 0, %.964 + %.965 =l add %.938, 60 + storew 0, %.965 + %.966 =l add %.938, 64 + %.967 =l copy 3 + storel %.967, %.966 + %.968 =l add %.938, 72 + %.969 =w copy 3413279085 + storew %.969, %.968 + %.970 =l add %.938, 76 + storew 0, %.970 + %.971 =l add %.938, 80 + storel 12472845116585076645, %.971 + %.972 =l add %.938, 88 + %.973 =w copy 1 + storew %.973, %.972 + %.974 =l add %.938, 92 + %.975 =w copy 18446744073709551607 + storew %.975, %.974 + %.976 =l add %.938, 96 + %.977 =l extsw 0 + %.978 =l sub %.977, 7 + %.979 =w copy %.978 + storew %.979, %.976 + %.980 =l add %.938, 100 + %.981 =l extsw 0 + %.982 =l sub %.981, 4 + %.983 =w copy %.982 + storew %.983, %.980 + %.984 =l add %.938, 104 + %.985 =w copy 6 + storew %.985, %.984 + %.986 =l add %.938, 108 + storew 0, %.986 + %.987 =l add %.938, 112 + %.988 =w copy 7 + storeb %.988, %.987 + %.989 =l add %.938, 113 + storeb 0, %.989 + %.990 =l add %.938, 114 + storeh 0, %.990 + %.991 =l add %.938, 116 + storew 0, %.991 + %.992 =l add %.938, 120 + %.993 =l copy 12916396440129209738 + storel %.993, %.992 + %.994 =l add %.938, 128 + %.995 =w copy 2845575975 + storew %.995, %.994 + %.996 =l add %.938, 132 + storew 0, %.996 + %.997 =l add %.938, 136 + storel 16685243662073323047, %.997 + %.998 =l add %.938, 144 + %.999 =w copy 2128478778 + storew %.999, %.998 + %.1000 =l add %.938, 148 + %.1001 =w copy 1831715476 + storew %.1001, %.1000 + %.1002 =l add %.938, 152 + %.1003 =w copy 2458647541 + storew %.1003, %.1002 + %.1004 =l add %.938, 156 + %.1005 =w copy 1195810902 + storew %.1005, %.1004 + %.1006 =l add %.938, 160 + %.1007 =w copy 0 + storew %.1007, %.1006 + %.1008 =l add %.938, 164 + storew 0, %.1008 + %.1009 =l add %.938, 168 + %.1010 =w copy 7 + storeb %.1010, %.1009 + %.1011 =l add %.938, 169 + storeb 0, %.1011 + %.1012 =l add %.938, 170 + storeh 0, %.1012 + %.1013 =l add %.938, 172 + storew 0, %.1013 + %.1014 =l add %.938, 176 + %.1015 =l copy 12916396440129209738 + storel %.1015, %.1014 + %.1016 =l add %.938, 184 + %.1017 =w copy 2845575975 + storew %.1017, %.1016 + %.1018 =l add %.938, 188 + storew 0, %.1018 + %.1019 =l add %.938, 192 + storel 16685243662073323047, %.1019 + %.1020 =l add %.938, 200 + %.1021 =w copy 2128478778 + storew %.1021, %.1020 + %.1022 =l add %.938, 204 + %.1023 =w copy 1831715476 + storew %.1023, %.1022 + %.1024 =l add %.938, 208 + %.1025 =w copy 2458647541 + storew %.1025, %.1024 + %.1026 =l add %.938, 212 + %.1027 =w copy 1195810902 + storew %.1027, %.1026 + %.1028 =l add %.938, 216 + %.1029 =w copy 0 + storew %.1029, %.1028 + %.1030 =l add %.938, 220 + storew 0, %.1030 + %.1031 =l add %.938, 224 + %.1032 =w copy 250 + storeb %.1032, %.1031 + %.1033 =l add %.938, 225 + storeb 0, %.1033 + %.1034 =l add %.938, 226 + storeh 0, %.1034 + %.1035 =l add %.938, 228 + storew 0, %.1035 + %.1036 =l add %.938, 232 + %.1037 =l copy 3 + storel %.1037, %.1036 + %.1038 =l add %.938, 240 + %.1039 =w copy 2424977419 + storew %.1039, %.1038 + %.1040 =l add %.938, 244 + storew 0, %.1040 + %.1041 =l add %.938, 248 + %.1042 =l copy 6541172831621759081 + storel %.1042, %.1041 + %.1043 =l add %.938, 256 + %.1044 =w copy 4294967290 + storew %.1044, %.1043 + %.1045 =l add %.938, 260 + %.1046 =w copy 18446744073709551613 + storew %.1046, %.1045 + %.1047 =l add %.938, 264 + %.1048 =w copy 1 + storew %.1048, %.1047 + %.1049 =l add %.938, 268 + %.1050 =w copy 4109237926 + storew %.1050, %.1049 + %.1051 =l add %.938, 272 + %.1052 =l extsw 0 + %.1053 =l sub %.1052, 3 + %.1054 =w copy %.1053 + storew %.1054, %.1051 + %.1055 =l add %.938, 276 + storew 0, %.1055 + %.1056 =l add %.938, 280 + %.1057 =w copy 4 + storeb %.1057, %.1056 + %.1058 =l add %.938, 281 + storeb 0, %.1058 + %.1059 =l add %.938, 282 + storeh 0, %.1059 + %.1060 =l add %.938, 284 + storew 0, %.1060 + %.1061 =l add %.938, 288 + %.1062 =l copy 3 + storel %.1062, %.1061 + %.1063 =l add %.938, 296 + %.1064 =w copy 3413279085 + storew %.1064, %.1063 + %.1065 =l add %.938, 300 + storew 0, %.1065 + %.1066 =l add %.938, 304 + storel 12472845116585076645, %.1066 + %.1067 =l add %.938, 312 + %.1068 =w copy 1 + storew %.1068, %.1067 + %.1069 =l add %.938, 316 + %.1070 =w copy 18446744073709551607 + storew %.1070, %.1069 + %.1071 =l add %.938, 320 + %.1072 =l extsw 0 + %.1073 =l sub %.1072, 7 + %.1074 =w copy %.1073 + storew %.1074, %.1071 + %.1075 =l add %.938, 324 + %.1076 =l extsw 0 + %.1077 =l sub %.1076, 4 + %.1078 =w copy %.1077 + storew %.1078, %.1075 + %.1079 =l add %.938, 328 + %.1080 =w copy 6 + storew %.1080, %.1079 + %.1081 =l add %.938, 332 + storew 0, %.1081 + %.1082 =l add %.938, 336 + %.1083 =w copy 4 + storeb %.1083, %.1082 + %.1084 =l add %.938, 337 + storeb 0, %.1084 + %.1085 =l add %.938, 338 + storeh 0, %.1085 + %.1086 =l add %.938, 340 + storew 0, %.1086 + %.1087 =l add %.938, 344 + %.1088 =l copy 3 + storel %.1088, %.1087 + %.1089 =l add %.938, 352 + %.1090 =w copy 3413279085 + storew %.1090, %.1089 + %.1091 =l add %.938, 356 + storew 0, %.1091 + %.1092 =l add %.938, 360 + storel 12472845116585076645, %.1092 + %.1093 =l add %.938, 368 + %.1094 =w copy 1 + storew %.1094, %.1093 + %.1095 =l add %.938, 372 + %.1096 =w copy 18446744073709551607 + storew %.1096, %.1095 + %.1097 =l add %.938, 376 + %.1098 =l extsw 0 + %.1099 =l sub %.1098, 7 + %.1100 =w copy %.1099 + storew %.1100, %.1097 + %.1101 =l add %.938, 380 + %.1102 =l extsw 0 + %.1103 =l sub %.1102, 4 + %.1104 =w copy %.1103 + storew %.1104, %.1101 + %.1105 =l add %.938, 384 + %.1106 =w copy 6 + storew %.1106, %.1105 + %.1107 =l add %.938, 388 + storew 0, %.1107 + %.1108 =l add %.938, 392 + %.1109 =w copy 250 + storeb %.1109, %.1108 + %.1110 =l add %.938, 393 + storeb 0, %.1110 + %.1111 =l add %.938, 394 + storeh 0, %.1111 + %.1112 =l add %.938, 396 + storew 0, %.1112 + %.1113 =l add %.938, 400 + %.1114 =l copy 3 + storel %.1114, %.1113 + %.1115 =l add %.938, 408 + %.1116 =w copy 2424977419 + storew %.1116, %.1115 + %.1117 =l add %.938, 412 + storew 0, %.1117 + %.1118 =l add %.938, 416 + %.1119 =l copy 6541172831621759081 + storel %.1119, %.1118 + %.1120 =l add %.938, 424 + %.1121 =w copy 4294967290 + storew %.1121, %.1120 + %.1122 =l add %.938, 428 + %.1123 =w copy 18446744073709551613 + storew %.1123, %.1122 + %.1124 =l add %.938, 432 + %.1125 =w copy 1 + storew %.1125, %.1124 + %.1126 =l add %.938, 436 + %.1127 =w copy 4109237926 + storew %.1127, %.1126 + %.1128 =l add %.938, 440 + %.1129 =l extsw 0 + %.1130 =l sub %.1129, 3 + %.1131 =w copy %.1130 + storew %.1131, %.1128 + %.1132 =l add %.938, 444 + storew 0, %.1132 + %.1133 =l add %.938, 448 + %.1134 =w copy 4 + storeb %.1134, %.1133 + %.1135 =l add %.938, 449 + storeb 0, %.1135 + %.1136 =l add %.938, 450 + storeh 0, %.1136 + %.1137 =l add %.938, 452 + storew 0, %.1137 + %.1138 =l add %.938, 456 + %.1139 =l copy 3 + storel %.1139, %.1138 + %.1140 =l add %.938, 464 + %.1141 =w copy 3413279085 + storew %.1141, %.1140 + %.1142 =l add %.938, 468 + storew 0, %.1142 + %.1143 =l add %.938, 472 + storel 12472845116585076645, %.1143 + %.1144 =l add %.938, 480 + %.1145 =w copy 1 + storew %.1145, %.1144 + %.1146 =l add %.938, 484 + %.1147 =w copy 18446744073709551607 + storew %.1147, %.1146 + %.1148 =l add %.938, 488 + %.1149 =l extsw 0 + %.1150 =l sub %.1149, 7 + %.1151 =w copy %.1150 + storew %.1151, %.1148 + %.1152 =l add %.938, 492 + %.1153 =l extsw 0 + %.1154 =l sub %.1153, 4 + %.1155 =w copy %.1154 + storew %.1155, %.1152 + %.1156 =l add %.938, 496 + %.1157 =w copy 6 + storew %.1157, %.1156 + %.1158 =l add %.938, 500 + storew 0, %.1158 + %.1159 =l add %.938, 504 + %.1160 =w copy 4 + storeb %.1160, %.1159 + %.1161 =l add %.938, 505 + storeb 0, %.1161 + %.1162 =l add %.938, 506 + storeh 0, %.1162 + %.1163 =l add %.938, 508 + storew 0, %.1163 + %.1164 =l add %.938, 512 + %.1165 =l copy 3 + storel %.1165, %.1164 + %.1166 =l add %.938, 520 + %.1167 =w copy 3413279085 + storew %.1167, %.1166 + %.1168 =l add %.938, 524 + storew 0, %.1168 + %.1169 =l add %.938, 528 + storel 12472845116585076645, %.1169 + %.1170 =l add %.938, 536 + %.1171 =w copy 1 + storew %.1171, %.1170 + %.1172 =l add %.938, 540 + %.1173 =w copy 18446744073709551607 + storew %.1173, %.1172 + %.1174 =l add %.938, 544 + %.1175 =l extsw 0 + %.1176 =l sub %.1175, 7 + %.1177 =w copy %.1176 + storew %.1177, %.1174 + %.1178 =l add %.938, 548 + %.1179 =l extsw 0 + %.1180 =l sub %.1179, 4 + %.1181 =w copy %.1180 + storew %.1181, %.1178 + %.1182 =l add %.938, 552 + %.1183 =w copy 6 + storew %.1183, %.1182 + %.1184 =l add %.938, 556 + storew 0, %.1184 + %.1185 =l add %.938, 560 + %.1186 =w copy 250 + storeb %.1186, %.1185 + %.1187 =l add %.938, 561 + storeb 0, %.1187 + %.1188 =l add %.938, 562 + storeh 0, %.1188 + %.1189 =l add %.938, 564 + storew 0, %.1189 + %.1190 =l add %.938, 568 + %.1191 =l copy 3 + storel %.1191, %.1190 + %.1192 =l add %.938, 576 + %.1193 =w copy 2424977419 + storew %.1193, %.1192 + %.1194 =l add %.938, 580 + storew 0, %.1194 + %.1195 =l add %.938, 584 + %.1196 =l copy 6541172831621759081 + storel %.1196, %.1195 + %.1197 =l add %.938, 592 + %.1198 =w copy 4294967290 + storew %.1198, %.1197 + %.1199 =l add %.938, 596 + %.1200 =w copy 18446744073709551613 + storew %.1200, %.1199 + %.1201 =l add %.938, 600 + %.1202 =w copy 1 + storew %.1202, %.1201 + %.1203 =l add %.938, 604 + %.1204 =w copy 4109237926 + storew %.1204, %.1203 + %.1205 =l add %.938, 608 + %.1206 =l extsw 0 + %.1207 =l sub %.1206, 3 + %.1208 =w copy %.1207 + storew %.1208, %.1205 + %.1209 =l add %.938, 612 + storew 0, %.1209 + %.1210 =l add %.938, 616 + %.1211 =w copy 4 + storeb %.1211, %.1210 + %.1212 =l add %.938, 617 + storeb 0, %.1212 + %.1213 =l add %.938, 618 + storeh 0, %.1213 + %.1214 =l add %.938, 620 + storew 0, %.1214 + %.1215 =l add %.938, 624 + %.1216 =l copy 3 + storel %.1216, %.1215 + %.1217 =l add %.938, 632 + %.1218 =w copy 3413279085 + storew %.1218, %.1217 + %.1219 =l add %.938, 636 + storew 0, %.1219 + %.1220 =l add %.938, 640 + storel 12472845116585076645, %.1220 + %.1221 =l add %.938, 648 + %.1222 =w copy 1 + storew %.1222, %.1221 + %.1223 =l add %.938, 652 + %.1224 =w copy 18446744073709551607 + storew %.1224, %.1223 + %.1225 =l add %.938, 656 + %.1226 =l extsw 0 + %.1227 =l sub %.1226, 7 + %.1228 =w copy %.1227 + storew %.1228, %.1225 + %.1229 =l add %.938, 660 + %.1230 =l extsw 0 + %.1231 =l sub %.1230, 4 + %.1232 =w copy %.1231 + storew %.1232, %.1229 + %.1233 =l add %.938, 664 + %.1234 =w copy 6 + storew %.1234, %.1233 + %.1235 =l add %.938, 668 + storew 0, %.1235 + %.1236 =l add %.938, 672 + %.1237 =w copy 4 + storeb %.1237, %.1236 + %.1238 =l add %.938, 673 + storeb 0, %.1238 + %.1239 =l add %.938, 674 + storeh 0, %.1239 + %.1240 =l add %.938, 676 + storew 0, %.1240 + %.1241 =l add %.938, 680 + %.1242 =l copy 3 + storel %.1242, %.1241 + %.1243 =l add %.938, 688 + %.1244 =w copy 3413279085 + storew %.1244, %.1243 + %.1245 =l add %.938, 692 + storew 0, %.1245 + %.1246 =l add %.938, 696 + storel 12472845116585076645, %.1246 + %.1247 =l add %.938, 704 + %.1248 =w copy 1 + storew %.1248, %.1247 + %.1249 =l add %.938, 708 + %.1250 =w copy 18446744073709551607 + storew %.1250, %.1249 + %.1251 =l add %.938, 712 + %.1252 =l extsw 0 + %.1253 =l sub %.1252, 7 + %.1254 =w copy %.1253 + storew %.1254, %.1251 + %.1255 =l add %.938, 716 + %.1256 =l extsw 0 + %.1257 =l sub %.1256, 4 + %.1258 =w copy %.1257 + storew %.1258, %.1255 + %.1259 =l add %.938, 720 + %.1260 =w copy 6 + storew %.1260, %.1259 + %.1261 =l add %.938, 724 + storew 0, %.1261 + %.1262 =l add %.938, 728 + %.1263 =w copy 250 + storeb %.1263, %.1262 + %.1264 =l add %.938, 729 + storeb 0, %.1264 + %.1265 =l add %.938, 730 + storeh 0, %.1265 + %.1266 =l add %.938, 732 + storew 0, %.1266 + %.1267 =l add %.938, 736 + %.1268 =l copy 3 + storel %.1268, %.1267 + %.1269 =l add %.938, 744 + %.1270 =w copy 2424977419 + storew %.1270, %.1269 + %.1271 =l add %.938, 748 + storew 0, %.1271 + %.1272 =l add %.938, 752 + %.1273 =l copy 6541172831621759081 + storel %.1273, %.1272 + %.1274 =l add %.938, 760 + %.1275 =w copy 4294967290 + storew %.1275, %.1274 + %.1276 =l add %.938, 764 + %.1277 =w copy 18446744073709551613 + storew %.1277, %.1276 + %.1278 =l add %.938, 768 + %.1279 =w copy 1 + storew %.1279, %.1278 + %.1280 =l add %.938, 772 + %.1281 =w copy 4109237926 + storew %.1281, %.1280 + %.1282 =l add %.938, 776 + %.1283 =l extsw 0 + %.1284 =l sub %.1283, 3 + %.1285 =w copy %.1284 + storew %.1285, %.1282 + %.1286 =l add %.938, 780 + storew 0, %.1286 + %.1287 =l add %.938, 784 + %.1288 =w copy 4 + storeb %.1288, %.1287 + %.1289 =l add %.938, 785 + storeb 0, %.1289 + %.1290 =l add %.938, 786 + storeh 0, %.1290 + %.1291 =l add %.938, 788 + storew 0, %.1291 + %.1292 =l add %.938, 792 + %.1293 =l copy 3 + storel %.1293, %.1292 + %.1294 =l add %.938, 800 + %.1295 =w copy 3413279085 + storew %.1295, %.1294 + %.1296 =l add %.938, 804 + storew 0, %.1296 + %.1297 =l add %.938, 808 + storel 12472845116585076645, %.1297 + %.1298 =l add %.938, 816 + %.1299 =w copy 1 + storew %.1299, %.1298 + %.1300 =l add %.938, 820 + %.1301 =w copy 18446744073709551607 + storew %.1301, %.1300 + %.1302 =l add %.938, 824 + %.1303 =l extsw 0 + %.1304 =l sub %.1303, 7 + %.1305 =w copy %.1304 + storew %.1305, %.1302 + %.1306 =l add %.938, 828 + %.1307 =l extsw 0 + %.1308 =l sub %.1307, 4 + %.1309 =w copy %.1308 + storew %.1309, %.1306 + %.1310 =l add %.938, 832 + %.1311 =w copy 6 + storew %.1311, %.1310 + %.1312 =l add %.938, 836 + storew 0, %.1312 + %.1313 =l add %.938, 840 + %.1314 =w copy 4 + storeb %.1314, %.1313 + %.1315 =l add %.938, 841 + storeb 0, %.1315 + %.1316 =l add %.938, 842 + storeh 0, %.1316 + %.1317 =l add %.938, 844 + storew 0, %.1317 + %.1318 =l add %.938, 848 + %.1319 =l copy 3 + storel %.1319, %.1318 + %.1320 =l add %.938, 856 + %.1321 =w copy 3413279085 + storew %.1321, %.1320 + %.1322 =l add %.938, 860 + storew 0, %.1322 + %.1323 =l add %.938, 864 + storel 12472845116585076645, %.1323 + %.1324 =l add %.938, 872 + %.1325 =w copy 1 + storew %.1325, %.1324 + %.1326 =l add %.938, 876 + %.1327 =w copy 18446744073709551607 + storew %.1327, %.1326 + %.1328 =l add %.938, 880 + %.1329 =l extsw 0 + %.1330 =l sub %.1329, 7 + %.1331 =w copy %.1330 + storew %.1331, %.1328 + %.1332 =l add %.938, 884 + %.1333 =l extsw 0 + %.1334 =l sub %.1333, 4 + %.1335 =w copy %.1334 + storew %.1335, %.1332 + %.1336 =l add %.938, 888 + %.1337 =w copy 6 + storew %.1337, %.1336 + %.1338 =l add %.938, 892 + storew 0, %.1338 + %.1339 =l add %.938, 896 + %.1340 =w copy 250 + storeb %.1340, %.1339 + %.1341 =l add %.938, 897 + storeb 0, %.1341 + %.1342 =l add %.938, 898 + storeh 0, %.1342 + %.1343 =l add %.938, 900 + storew 0, %.1343 + %.1344 =l add %.938, 904 + %.1345 =l copy 3 + storel %.1345, %.1344 + %.1346 =l add %.938, 912 + %.1347 =w copy 2424977419 + storew %.1347, %.1346 + %.1348 =l add %.938, 916 + storew 0, %.1348 + %.1349 =l add %.938, 920 + %.1350 =l copy 6541172831621759081 + storel %.1350, %.1349 + %.1351 =l add %.938, 928 + %.1352 =w copy 4294967290 + storew %.1352, %.1351 + %.1353 =l add %.938, 932 + %.1354 =w copy 18446744073709551613 + storew %.1354, %.1353 + %.1355 =l add %.938, 936 + %.1356 =w copy 1 + storew %.1356, %.1355 + %.1357 =l add %.938, 940 + %.1358 =w copy 4109237926 + storew %.1358, %.1357 + %.1359 =l add %.938, 944 + %.1360 =l extsw 0 + %.1361 =l sub %.1360, 3 + %.1362 =w copy %.1361 + storew %.1362, %.1359 + %.1363 =l add %.938, 948 + storew 0, %.1363 + %.1364 =l add %.938, 952 + %.1365 =w copy 4 + storeb %.1365, %.1364 + %.1366 =l add %.938, 953 + storeb 0, %.1366 + %.1367 =l add %.938, 954 + storeh 0, %.1367 + %.1368 =l add %.938, 956 + storew 0, %.1368 + %.1369 =l add %.938, 960 + %.1370 =l copy 3 + storel %.1370, %.1369 + %.1371 =l add %.938, 968 + %.1372 =w copy 3413279085 + storew %.1372, %.1371 + %.1373 =l add %.938, 972 + storew 0, %.1373 + %.1374 =l add %.938, 976 + storel 12472845116585076645, %.1374 + %.1375 =l add %.938, 984 + %.1376 =w copy 1 + storew %.1376, %.1375 + %.1377 =l add %.938, 988 + %.1378 =w copy 18446744073709551607 + storew %.1378, %.1377 + %.1379 =l add %.938, 992 + %.1380 =l extsw 0 + %.1381 =l sub %.1380, 7 + %.1382 =w copy %.1381 + storew %.1382, %.1379 + %.1383 =l add %.938, 996 + %.1384 =l extsw 0 + %.1385 =l sub %.1384, 4 + %.1386 =w copy %.1385 + storew %.1386, %.1383 + %.1387 =l add %.938, 1000 + %.1388 =w copy 6 + storew %.1388, %.1387 + %.1389 =l add %.938, 1004 + storew 0, %.1389 + %.1390 =l add %.938, 1008 + %.1391 =w copy 4 + storeb %.1391, %.1390 + %.1392 =l add %.938, 1009 + storeb 0, %.1392 + %.1393 =l add %.938, 1010 + storeh 0, %.1393 + %.1394 =l add %.938, 1012 + storew 0, %.1394 + %.1395 =l add %.938, 1016 + %.1396 =l copy 3 + storel %.1396, %.1395 + %.1397 =l add %.938, 1024 + %.1398 =w copy 3413279085 + storew %.1398, %.1397 + %.1399 =l add %.938, 1028 + storew 0, %.1399 + %.1400 =l add %.938, 1032 + storel 12472845116585076645, %.1400 + %.1401 =l add %.938, 1040 + %.1402 =w copy 1 + storew %.1402, %.1401 + %.1403 =l add %.938, 1044 + %.1404 =w copy 18446744073709551607 + storew %.1404, %.1403 + %.1405 =l add %.938, 1048 + %.1406 =l extsw 0 + %.1407 =l sub %.1406, 7 + %.1408 =w copy %.1407 + storew %.1408, %.1405 + %.1409 =l add %.938, 1052 + %.1410 =l extsw 0 + %.1411 =l sub %.1410, 4 + %.1412 =w copy %.1411 + storew %.1412, %.1409 + %.1413 =l add %.938, 1056 + %.1414 =w copy 6 + storew %.1414, %.1413 + %.1415 =l add %.938, 1060 + storew 0, %.1415 + %.1416 =l add %.938, 1064 + %.1417 =w copy 250 + storeb %.1417, %.1416 + %.1418 =l add %.938, 1065 + storeb 0, %.1418 + %.1419 =l add %.938, 1066 + storeh 0, %.1419 + %.1420 =l add %.938, 1068 + storew 0, %.1420 + %.1421 =l add %.938, 1072 + %.1422 =l copy 3 + storel %.1422, %.1421 + %.1423 =l add %.938, 1080 + %.1424 =w copy 2424977419 + storew %.1424, %.1423 + %.1425 =l add %.938, 1084 + storew 0, %.1425 + %.1426 =l add %.938, 1088 + %.1427 =l copy 6541172831621759081 + storel %.1427, %.1426 + %.1428 =l add %.938, 1096 + %.1429 =w copy 4294967290 + storew %.1429, %.1428 + %.1430 =l add %.938, 1100 + %.1431 =w copy 18446744073709551613 + storew %.1431, %.1430 + %.1432 =l add %.938, 1104 + %.1433 =w copy 1 + storew %.1433, %.1432 + %.1434 =l add %.938, 1108 + %.1435 =w copy 4109237926 + storew %.1435, %.1434 + %.1436 =l add %.938, 1112 + %.1437 =l extsw 0 + %.1438 =l sub %.1437, 3 + %.1439 =w copy %.1438 + storew %.1439, %.1436 + %.1440 =l add %.938, 1116 + storew 0, %.1440 + %.1441 =l add %.938, 1120 + %.1442 =w copy 4 + storeb %.1442, %.1441 + %.1443 =l add %.938, 1121 + storeb 0, %.1443 + %.1444 =l add %.938, 1122 + storeh 0, %.1444 + %.1445 =l add %.938, 1124 + storew 0, %.1445 + %.1446 =l add %.938, 1128 + %.1447 =l copy 3 + storel %.1447, %.1446 + %.1448 =l add %.938, 1136 + %.1449 =w copy 3413279085 + storew %.1449, %.1448 + %.1450 =l add %.938, 1140 + storew 0, %.1450 + %.1451 =l add %.938, 1144 + storel 12472845116585076645, %.1451 + %.1452 =l add %.938, 1152 + %.1453 =w copy 1 + storew %.1453, %.1452 + %.1454 =l add %.938, 1156 + %.1455 =w copy 18446744073709551607 + storew %.1455, %.1454 + %.1456 =l add %.938, 1160 + %.1457 =l extsw 0 + %.1458 =l sub %.1457, 7 + %.1459 =w copy %.1458 + storew %.1459, %.1456 + %.1460 =l add %.938, 1164 + %.1461 =l extsw 0 + %.1462 =l sub %.1461, 4 + %.1463 =w copy %.1462 + storew %.1463, %.1460 + %.1464 =l add %.938, 1168 + %.1465 =w copy 6 + storew %.1465, %.1464 + %.1466 =l add %.938, 1172 + storew 0, %.1466 + %.1467 =l add %.938, 1176 + %.1468 =w copy 4 + storeb %.1468, %.1467 + %.1469 =l add %.938, 1177 + storeb 0, %.1469 + %.1470 =l add %.938, 1178 + storeh 0, %.1470 + %.1471 =l add %.938, 1180 + storew 0, %.1471 + %.1472 =l add %.938, 1184 + %.1473 =l copy 3 + storel %.1473, %.1472 + %.1474 =l add %.938, 1192 + %.1475 =w copy 3413279085 + storew %.1475, %.1474 + %.1476 =l add %.938, 1196 + storew 0, %.1476 + %.1477 =l add %.938, 1200 + storel 12472845116585076645, %.1477 + %.1478 =l add %.938, 1208 + %.1479 =w copy 1 + storew %.1479, %.1478 + %.1480 =l add %.938, 1212 + %.1481 =w copy 18446744073709551607 + storew %.1481, %.1480 + %.1482 =l add %.938, 1216 + %.1483 =l extsw 0 + %.1484 =l sub %.1483, 7 + %.1485 =w copy %.1484 + storew %.1485, %.1482 + %.1486 =l add %.938, 1220 + %.1487 =l extsw 0 + %.1488 =l sub %.1487, 4 + %.1489 =w copy %.1488 + storew %.1489, %.1486 + %.1490 =l add %.938, 1224 + %.1491 =w copy 6 + storew %.1491, %.1490 + %.1492 =l add %.938, 1228 + storew 0, %.1492 + %.1493 =l add %.938, 1232 + %.1494 =w copy 250 + storeb %.1494, %.1493 + %.1495 =l add %.938, 1233 + storeb 0, %.1495 + %.1496 =l add %.938, 1234 + storeh 0, %.1496 + %.1497 =l add %.938, 1236 + storew 0, %.1497 + %.1498 =l add %.938, 1240 + %.1499 =l copy 3 + storel %.1499, %.1498 + %.1500 =l add %.938, 1248 + %.1501 =w copy 2424977419 + storew %.1501, %.1500 + %.1502 =l add %.938, 1252 + storew 0, %.1502 + %.1503 =l add %.938, 1256 + %.1504 =l copy 6541172831621759081 + storel %.1504, %.1503 + %.1505 =l add %.938, 1264 + %.1506 =w copy 4294967290 + storew %.1506, %.1505 + %.1507 =l add %.938, 1268 + %.1508 =w copy 18446744073709551613 + storew %.1508, %.1507 + %.1509 =l add %.938, 1272 + %.1510 =w copy 1 + storew %.1510, %.1509 + %.1511 =l add %.938, 1276 + %.1512 =w copy 4109237926 + storew %.1512, %.1511 + %.1513 =l add %.938, 1280 + %.1514 =l extsw 0 + %.1515 =l sub %.1514, 3 + %.1516 =w copy %.1515 + storew %.1516, %.1513 + %.1517 =l add %.938, 1284 + storew 0, %.1517 + %.1518 =l add %.938, 1288 + %.1519 =w copy 4 + storeb %.1519, %.1518 + %.1520 =l add %.938, 1289 + storeb 0, %.1520 + %.1521 =l add %.938, 1290 + storeh 0, %.1521 + %.1522 =l add %.938, 1292 + storew 0, %.1522 + %.1523 =l add %.938, 1296 + %.1524 =l copy 3 + storel %.1524, %.1523 + %.1525 =l add %.938, 1304 + %.1526 =w copy 3413279085 + storew %.1526, %.1525 + %.1527 =l add %.938, 1308 + storew 0, %.1527 + %.1528 =l add %.938, 1312 + storel 12472845116585076645, %.1528 + %.1529 =l add %.938, 1320 + %.1530 =w copy 1 + storew %.1530, %.1529 + %.1531 =l add %.938, 1324 + %.1532 =w copy 18446744073709551607 + storew %.1532, %.1531 + %.1533 =l add %.938, 1328 + %.1534 =l extsw 0 + %.1535 =l sub %.1534, 7 + %.1536 =w copy %.1535 + storew %.1536, %.1533 + %.1537 =l add %.938, 1332 + %.1538 =l extsw 0 + %.1539 =l sub %.1538, 4 + %.1540 =w copy %.1539 + storew %.1540, %.1537 + %.1541 =l add %.938, 1336 + %.1542 =w copy 6 + storew %.1542, %.1541 + %.1543 =l add %.938, 1340 + storew 0, %.1543 + %.1544 =l add %.938, 1344 + %.1545 =w copy 4 + storeb %.1545, %.1544 + %.1546 =l add %.938, 1345 + storeb 0, %.1546 + %.1547 =l add %.938, 1346 + storeh 0, %.1547 + %.1548 =l add %.938, 1348 + storew 0, %.1548 + %.1549 =l add %.938, 1352 + %.1550 =l copy 3 + storel %.1550, %.1549 + %.1551 =l add %.938, 1360 + %.1552 =w copy 3413279085 + storew %.1552, %.1551 + %.1553 =l add %.938, 1364 + storew 0, %.1553 + %.1554 =l add %.938, 1368 + storel 12472845116585076645, %.1554 + %.1555 =l add %.938, 1376 + %.1556 =w copy 1 + storew %.1556, %.1555 + %.1557 =l add %.938, 1380 + %.1558 =w copy 18446744073709551607 + storew %.1558, %.1557 + %.1559 =l add %.938, 1384 + %.1560 =l extsw 0 + %.1561 =l sub %.1560, 7 + %.1562 =w copy %.1561 + storew %.1562, %.1559 + %.1563 =l add %.938, 1388 + %.1564 =l extsw 0 + %.1565 =l sub %.1564, 4 + %.1566 =w copy %.1565 + storew %.1566, %.1563 + %.1567 =l add %.938, 1392 + %.1568 =w copy 6 + storew %.1568, %.1567 + %.1569 =l add %.938, 1396 + storew 0, %.1569 + %.1570 =l add %.938, 1400 + %.1571 =w copy 250 + storeb %.1571, %.1570 + %.1572 =l add %.938, 1401 + storeb 0, %.1572 + %.1573 =l add %.938, 1402 + storeh 0, %.1573 + %.1574 =l add %.938, 1404 + storew 0, %.1574 + %.1575 =l add %.938, 1408 + %.1576 =l copy 3 + storel %.1576, %.1575 + %.1577 =l add %.938, 1416 + %.1578 =w copy 2424977419 + storew %.1578, %.1577 + %.1579 =l add %.938, 1420 + storew 0, %.1579 + %.1580 =l add %.938, 1424 + %.1581 =l copy 6541172831621759081 + storel %.1581, %.1580 + %.1582 =l add %.938, 1432 + %.1583 =w copy 4294967290 + storew %.1583, %.1582 + %.1584 =l add %.938, 1436 + %.1585 =w copy 18446744073709551613 + storew %.1585, %.1584 + %.1586 =l add %.938, 1440 + %.1587 =w copy 1 + storew %.1587, %.1586 + %.1588 =l add %.938, 1444 + %.1589 =w copy 4109237926 + storew %.1589, %.1588 + %.1590 =l add %.938, 1448 + %.1591 =l extsw 0 + %.1592 =l sub %.1591, 3 + %.1593 =w copy %.1592 + storew %.1593, %.1590 + %.1594 =l add %.938, 1452 + storew 0, %.1594 + %.1595 =l add %.938, 1456 + %.1596 =w copy 4 + storeb %.1596, %.1595 + %.1597 =l add %.938, 1457 + storeb 0, %.1597 + %.1598 =l add %.938, 1458 + storeh 0, %.1598 + %.1599 =l add %.938, 1460 + storew 0, %.1599 + %.1600 =l add %.938, 1464 + %.1601 =l copy 3 + storel %.1601, %.1600 + %.1602 =l add %.938, 1472 + %.1603 =w copy 3413279085 + storew %.1603, %.1602 + %.1604 =l add %.938, 1476 + storew 0, %.1604 + %.1605 =l add %.938, 1480 + storel 12472845116585076645, %.1605 + %.1606 =l add %.938, 1488 + %.1607 =w copy 1 + storew %.1607, %.1606 + %.1608 =l add %.938, 1492 + %.1609 =w copy 18446744073709551607 + storew %.1609, %.1608 + %.1610 =l add %.938, 1496 + %.1611 =l extsw 0 + %.1612 =l sub %.1611, 7 + %.1613 =w copy %.1612 + storew %.1613, %.1610 + %.1614 =l add %.938, 1500 + %.1615 =l extsw 0 + %.1616 =l sub %.1615, 4 + %.1617 =w copy %.1616 + storew %.1617, %.1614 + %.1618 =l add %.938, 1504 + %.1619 =w copy 6 + storew %.1619, %.1618 + %.1620 =l add %.938, 1508 + storew 0, %.1620 + %.1621 =l add %.938, 1512 + %.1622 =w copy 4 + storeb %.1622, %.1621 + %.1623 =l add %.938, 1513 + storeb 0, %.1623 + %.1624 =l add %.938, 1514 + storeh 0, %.1624 + %.1625 =l add %.938, 1516 + storew 0, %.1625 + %.1626 =l add %.938, 1520 + %.1627 =l copy 3 + storel %.1627, %.1626 + %.1628 =l add %.938, 1528 + %.1629 =w copy 3413279085 + storew %.1629, %.1628 + %.1630 =l add %.938, 1532 + storew 0, %.1630 + %.1631 =l add %.938, 1536 + storel 12472845116585076645, %.1631 + %.1632 =l add %.938, 1544 + %.1633 =w copy 1 + storew %.1633, %.1632 + %.1634 =l add %.938, 1548 + %.1635 =w copy 18446744073709551607 + storew %.1635, %.1634 + %.1636 =l add %.938, 1552 + %.1637 =l extsw 0 + %.1638 =l sub %.1637, 7 + %.1639 =w copy %.1638 + storew %.1639, %.1636 + %.1640 =l add %.938, 1556 + %.1641 =l extsw 0 + %.1642 =l sub %.1641, 4 + %.1643 =w copy %.1642 + storew %.1643, %.1640 + %.1644 =l add %.938, 1560 + %.1645 =w copy 6 + storew %.1645, %.1644 + %.1646 =l add %.938, 1564 + storew 0, %.1646 + %.1648 =l add %.1647, 0 + %.1649 =w copy 93 + storeb %.1649, %.1648 + %.1650 =l add %.1647, 1 + storeb 0, %.1650 + %.1651 =l add %.1647, 2 + storeh 0, %.1651 + %.1652 =l add %.1647, 4 + storew 0, %.1652 + %.1653 =l add %.1647, 8 + storel 1149193768119386005, %.1653 + %.1654 =l add %.1647, 16 + %.1655 =w copy 3821279724 + storew %.1655, %.1654 + %.1656 =l add %.1647, 20 + storew 0, %.1656 + %.1657 =l add %.1647, 24 + storel 13837231179985012781, %.1657 + %.1658 =l add %.1647, 32 + %.1659 =w copy 8 + storew %.1659, %.1658 + %.1660 =l add %.1647, 36 + %.1661 =w copy 2352557560 + storew %.1661, %.1660 + %.1662 =l add %.1647, 40 + %.1663 =w copy 3321767348 + storew %.1663, %.1662 + %.1664 =l add %.1647, 44 + %.1665 =w copy 1113148436 + storew %.1665, %.1664 + %.1666 =l add %.1647, 48 + %.1667 =w copy 5 + storew %.1667, %.1666 + %.1668 =l add %.1647, 52 + storew 0, %.1668 + %.1670 =l add %.1669, 0 + %.1671 =l copy $g_265 + %.1672 =l mul 40, 1 + %.1673 =l add %.1671, %.1672 + %.1674 =l copy %.1673 + storel %.1674, %.1670 + %.1676 =l add %.1675, 0 + storel $g_88, %.1676 + %.1678 =l add %.1677, 0 + storel $g_634, %.1678 + %.1680 =l add %.1679, 0 + %.1681 =l extsw 4 + %.1682 =l mul %.1681, 1 + %.1683 =l add $g_132, %.1682 + storel %.1683, %.1680 + %.1685 =l add %.1684, 0 + %.1686 =l extsw 0 + %.1687 =l copy %.1686 + storel %.1687, %.1685 + %.1690 =l add %.1689, 0 + %.1691 =w copy 4 + storeh %.1691, %.1690 + storew 0, %.1692 +@for_cond.991 + %.1695 =w loadsw %.1692 + %.1696 =w csltw %.1695, 1 + jnz %.1696, @for_body.992, @for_join.994 +@for_body.992 + %.1697 =w loadsw %.1692 + %.1698 =l extsw %.1697 + %.1699 =l mul %.1698, 8 + %.1700 =l add %.14, %.1699 + storel $g_24, %.1700 +@for_cont.993 + %.1701 =w loadsw %.1692 + %.1702 =w add %.1701, 1 + storew %.1702, %.1692 + jmp @for_cond.991 +@for_join.994 + storew 0, %.1692 +@for_cond.995 + %.1703 =w loadsw %.1692 + %.1704 =w csltw %.1703, 2 + jnz %.1704, @for_body.996, @for_join.998 +@for_body.996 + %.1705 =w copy 30 + %.1706 =w loadsw %.1692 + %.1707 =l extsw %.1706 + %.1708 =l mul %.1707, 1 + %.1709 =l add %.1688, %.1708 + storeb %.1705, %.1709 +@for_cont.997 + %.1710 =w loadsw %.1692 + %.1711 =w add %.1710, 1 + storew %.1711, %.1692 + jmp @for_cond.995 +@for_join.998 + %.1712 =l loadl %.1669 + ret %.1712 +} +function w $func_16(l %.1, l %.3) { +@start.999 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.5 =l alloc4 2 +@body.1000 + %.6 =l add %.5, 0 + %.7 =w copy 1 + storeh %.7, %.6 + %.8 =w loadsh %.5 + %.9 =w extsh %.8 + ret %.9 +} +type :S1.1 = { w, w, h, w, w, } +function l $func_19(w %.1, l %.3, :S1.1 %.5) { +@start.1001 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.6 =l alloc8 8 + %.8 =l alloc8 8 + %.27 =l alloc8 8 +@body.1002 + %.7 =l add %.6, 0 + storel $g_80, %.7 + %.9 =l add %.8, 0 + %.10 =l copy $g_518 + %.11 =l mul 16, 1 + %.12 =l add %.10, %.11 + %.13 =l copy %.12 + storel %.13, %.9 + %.14 =w sub 0, 11 + %.15 =w copy %.14 + %.16 =l copy $g_794 + %.17 =l mul 16, 1 + %.18 =l add %.16, %.17 + %.19 =l copy %.18 + storew %.15, %.19 +@for_cond.1003 + %.20 =l copy $g_794 + %.21 =l mul 16, 1 + %.22 =l add %.20, %.21 + %.23 =l copy %.22 + %.24 =w loaduw %.23 + %.25 =w copy 12 + %.26 =w cultw %.24, %.25 + jnz %.26, @for_body.1004, @for_join.1006 +@for_body.1004 + %.28 =l add %.27, 0 + %.29 =l copy $g_518 + %.30 =l mul 8, 1 + %.31 =l add %.29, %.30 + %.32 =l copy %.31 + storel %.32, %.28 + %.33 =l loadl $g_38 + %.34 =l loadl %.33 + %.35 =w loadsw %.34 + %.36 =l loadl %.27 + %.37 =l loadl %.6 + %.38 =w ceql %.36, %.37 + %.39 =w and %.35, %.38 + storew %.39, %.34 +@for_cont.1005 + %.40 =l copy $g_794 + %.41 =l mul 16, 1 + %.42 =l add %.40, %.41 + %.43 =l copy %.42 + %.44 =w loaduw %.43 + %.45 =l extuw %.44 + %.46 =l extsw 4 + %.47 =l call $safe_add_func_uint64_t_u_u(l %.45, l %.46) + %.48 =w copy %.47 + %.49 =l copy $g_794 + %.50 =l mul 16, 1 + %.51 =l add %.49, %.50 + %.52 =l copy %.51 + storew %.48, %.52 + jmp @for_cond.1003 +@for_join.1006 + %.53 =l loadl %.8 + %.54 =l loadl $g_38 + storel %.53, %.54 + %.55 =l loadl $g_88 + %.56 =l loadl %.55 + %.57 =l loadl %.56 + ret %.57 +} +type :S0.2 = { b, l, w, l, w, w, w, w, w, } +function w $func_25(w %.1, w %.3, l %.5, :S0.2 %.7) { +@start.1007 + %.2 =l alloc4 1 + storeb %.1, %.2 + %.4 =l alloc4 2 + storeh %.3, %.4 + %.6 =l alloc8 8 + storel %.5, %.6 + %.8 =l alloc8 64 + %.9 =l alloc8 8 + %.14 =l alloc4 40 + %.37 =l alloc8 56 + %.38 =l alloc4 4 +@body.1008 + %.10 =l add %.9, 0 + %.11 =l extsw 1 + %.12 =l mul %.11, 8 + %.13 =l add %.8, %.12 + storel %.13, %.10 + %.15 =l add %.14, 0 + %.16 =w copy 1614650852 + storew %.16, %.15 + %.17 =l add %.14, 4 + %.18 =w copy 18446744073709551609 + storew %.18, %.17 + %.19 =l add %.14, 8 + %.20 =w copy 53864 + storeh %.20, %.19 + %.21 =l add %.14, 10 + storeh 0, %.21 + %.22 =l add %.14, 12 + %.23 =w copy 3514176187 + storew %.23, %.22 + %.24 =l add %.14, 16 + %.25 =w copy 3295455848 + storew %.25, %.24 + %.26 =l add %.14, 20 + %.27 =w copy 1614650852 + storew %.27, %.26 + %.28 =l add %.14, 24 + %.29 =w copy 18446744073709551609 + storew %.29, %.28 + %.30 =l add %.14, 28 + %.31 =w copy 53864 + storeh %.31, %.30 + %.32 =l add %.14, 30 + storeh 0, %.32 + %.33 =l add %.14, 32 + %.34 =w copy 3514176187 + storew %.34, %.33 + %.35 =l add %.14, 36 + %.36 =w copy 3295455848 + storew %.36, %.35 + storew 0, %.38 +@for_cond.1009 + %.39 =w loadsw %.38 + %.40 =w csltw %.39, 8 + jnz %.40, @for_body.1010, @for_join.1012 +@for_body.1010 + %.41 =l copy $g_794 + %.42 =l mul 12, 1 + %.43 =l add %.41, %.42 + %.44 =l copy %.43 + %.45 =w loadsw %.38 + %.46 =l extsw %.45 + %.47 =l mul %.46, 8 + %.48 =l add %.8, %.47 + storel %.44, %.48 +@for_cont.1011 + %.49 =w loadsw %.38 + %.50 =w add %.49, 1 + storew %.50, %.38 + jmp @for_cond.1009 +@for_join.1012 + storew 0, %.38 +@for_cond.1013 + %.51 =w loadsw %.38 + %.52 =w csltw %.51, 7 + jnz %.52, @for_body.1014, @for_join.1016 +@for_body.1014 + %.53 =w loadsw %.38 + %.54 =l extsw %.53 + %.55 =l mul %.54, 8 + %.56 =l add %.37, %.55 + storel $g_201, %.56 +@for_cont.1015 + %.57 =w loadsw %.38 + %.58 =w add %.57, 1 + storew %.58, %.38 + jmp @for_cond.1013 +@for_join.1016 + %.59 =l loadl %.9 + %.60 =l extsw 1 + %.61 =l mul %.60, 8 + %.62 =l add %.8, %.61 + %.63 =w ceql %.59, %.62 + %.64 =l extsw %.63 + %.65 =l and %.64, 1 + %.66 =l copy %.65 + %.67 =l copy %.7 + %.68 =l mul 0, 1 + %.69 =l add %.67, %.68 + %.70 =l copy %.69 + %.71 =w loadub %.70 + %.72 =l extsw 0 + %.73 =l mul %.72, 20 + %.74 =l add %.14, %.73 + %.75 =l extsw 0 + %.76 =l mul %.75, 20 + %.77 =l add %.14, %.76 + %.78 =l copy %.77 + %.79 =l mul 8, 1 + %.80 =l add %.78, %.79 + %.81 =l copy %.80 + %.82 =w loadsh %.81 + %.83 =l loadl $g_201 + %.84 =l extsw 3 + %.85 =l mul %.84, 8 + %.86 =l add %.37, %.85 + %.87 =l loadl %.86 + %.88 =w cnel $g_201, %.87 + %.89 =w cnew %.88, 0 + jnz %.89, @logic_right.1021, @logic_join.1022 +@logic_right.1021 + %.90 =l loadl %.6 + %.91 =w loadsw %.90 + %.92 =w cnew %.91, 0 +@logic_join.1022 + %.93 =w phi @for_join.1016 %.89, @logic_right.1021 %.92 + %.94 =w cnew %.93, 0 + jnz %.94, @logic_join.1020, @logic_right.1019 +@logic_right.1019 + %.95 =l extsw 0 + %.96 =l mul %.95, 20 + %.97 =l add %.14, %.96 + %.98 =l copy %.97 + %.99 =l mul 12, 1 + %.100 =l add %.98, %.99 + %.101 =l copy %.100 + %.102 =w loadsw %.101 + %.103 =w cnew %.102, 0 +@logic_join.1020 + %.104 =w phi @logic_join.1022 %.94, @logic_right.1019 %.103 + %.105 =w copy %.104 + %.106 =l extsw 0 + %.107 =l mul %.106, 20 + %.108 =l add %.14, %.107 + %.109 =l copy %.108 + %.110 =l mul 4, 1 + %.111 =l add %.109, %.110 + %.112 =l copy %.111 + %.113 =w loaduw %.112 + %.114 =w and %.105, %.113 + %.115 =w copy 0 + %.116 =w ceqw %.114, %.115 + %.117 =w cnew %.116, 0 + jnz %.117, @logic_join.1018, @logic_right.1017 +@logic_right.1017 + %.118 =w cnel 12400815938564546249, 0 +@logic_join.1018 + %.119 =w phi @logic_join.1020 %.117, @logic_right.1017 %.118 + %.120 =l extsw %.119 + %.121 =l or %.120, 4294967288 + %.122 =l extsw 0 + %.123 =l mul %.122, 20 + %.124 =l add %.14, %.123 + %.125 =l copy %.124 + %.126 =l mul 0, 1 + %.127 =l add %.125, %.126 + %.128 =l copy %.127 + %.129 =w loadsw %.128 + %.130 =l extsw %.129 + %.131 =w cugtl %.121, %.130 + %.132 =w loadsh %.4 + %.133 =w extsh %.132 + %.134 =w cnew %.131, %.133 + %.135 =l extsw 0 + %.136 =l extsw 2 + %.137 =l mul %.136, 8 + %.138 =l add %.8, %.137 + %.139 =l loadl %.138 + %.140 =w ceql %.135, %.139 + %.141 =l extsw %.140 + %.142 =l copy %.7 + %.143 =l mul 16, 1 + %.144 =l add %.142, %.143 + %.145 =l copy %.144 + %.146 =w loadsw %.145 + %.147 =l extsw %.146 + %.148 =l call $safe_sub_func_int64_t_s_s(l %.141, l %.147) + %.149 =l copy %.148 + %.150 =w cugel %.66, %.149 + %.151 =w loadsh %.4 + %.152 =w extsh %.151 + %.153 =l extsw 0 + %.154 =l mul %.153, 20 + %.155 =l add %.14, %.154 + %.156 =l copy %.155 + %.157 =l mul 4, 1 + %.158 =l add %.156, %.157 + %.159 =l copy %.158 + %.160 =w loaduw %.159 + %.161 =w or %.152, %.160 + %.162 =w copy %.161 + %.163 =l loadl %.6 + storew %.162, %.163 + %.164 =l extsw 0 + %.165 =l mul %.164, 20 + %.166 =l add %.14, %.165 + %.167 =l copy %.166 + %.168 =l mul 4, 1 + %.169 =l add %.167, %.168 + %.170 =l copy %.169 + %.171 =w loaduw %.170 + %.172 =w copy %.171 + ret %.172 +} +function w $func_30(w %.1, l %.3) { +@start.1023 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.5 =l alloc4 1 + %.8 =l alloc8 8 + %.10 =l alloc8 56 + %.35 =l alloc8 8 + %.37 =l alloc8 8 + %.39 =l alloc8 336 + %.84 =l alloc4 4 + %.87 =l alloc8 8 + %.89 =l alloc4 20 + %.101 =l alloc4 4 + %.104 =l alloc4 4 + %.109 =l alloc8 8 + %.110 =l alloc4 2 + %.113 =l alloc4 4 + %.118 =l alloc4 4 + %.123 =l alloc4 4 + %.126 =l alloc4 4 + %.129 =l alloc4 4 + %.132 =l alloc4 24 + %.133 =l alloc4 20 + %.147 =l alloc8 8 + %.151 =l alloc8 8 + %.155 =l alloc8 8 + %.161 =l alloc4 2 + %.164 =l alloc4 12 + %.165 =l alloc4 4 + %.170 =l alloc4 2 + %.173 =l alloc4 4 + %.174 =l alloc4 4 + %.175 =l alloc4 4 + %.206 =l alloc4 2 + %.209 =l alloc8 72 + %.219 =l alloc4 4 + %.224 =l alloc4 4 + %.227 =l alloc4 4 + %.230 =l alloc4 1 + %.233 =l alloc8 8 + %.235 =l alloc8 8 + %.237 =l alloc4 4 + %.240 =l alloc4 4 + %.241 =l alloc4 4 + %.254 =l alloc8 8 + %.272 =l alloc4 1 + %.275 =l alloc8 56 + %.323 =l alloc8 8 + %.325 =l alloc8 8 + %.327 =l alloc8 8 + %.329 =l alloc8 8 + %.331 =l alloc4 4 + %.334 =l alloc4 4 + %.444 =l alloc4 2 + %.447 =l alloc4 24 + %.448 =l alloc8 8 + %.452 =l alloc4 4 + %.455 =l alloc4 4 + %.458 =l alloc4 4 + %.461 =l alloc4 4 + %.462 =l alloc8 8 + %.464 =l alloc4 4 + %.465 =l alloc4 4 + %.500 =l alloc4 1 + %.503 =l alloc8 8 + %.505 =l alloc8 8 + %.509 =l alloc4 1 + %.512 =l alloc8 144 + %.531 =l alloc8 8 + %.533 =l alloc4 980 + %.1106 =l alloc4 4 + %.1109 =l alloc8 8 + %.1113 =l alloc8 8 + %.1115 =l alloc8 8 + %.1117 =l alloc4 4 + %.1120 =l alloc4 1 + %.1123 =l alloc4 4 + %.1124 =l alloc4 4 + %.1125 =l alloc4 4 + %.1138 =l alloc4 36 + %.1157 =l alloc4 4 + %.1193 =l alloc8 8 + %.1198 =l alloc4 24 + %.1211 =l alloc8 8 + %.1213 =l alloc8 8 + %.1215 =l alloc4 4 + %.1220 =l alloc4 20 + %.1261 =l alloc8 8 + %.1263 =l alloc4 4 + %.1266 =l alloc4 4 + %.1269 =l alloc4 4 + %.1272 =l alloc4 4 + %.1275 =l alloc4 12 + %.1276 =l alloc8 8 + %.1278 =l alloc8 8 + %.1284 =l alloc4 1 + %.1287 =l alloc8 8 + %.1289 =l alloc8 8 + %.1291 =l alloc8 56 + %.1316 =l alloc4 4 + %.1317 =l alloc4 4 + %.1318 =l alloc4 4 + %.1350 =l alloc8 8 + %.1378 =l alloc4 4 + %.1405 =l alloc8 8 + %.1407 =l alloc8 1008 + %.1564 =l alloc8 8 + %.1568 =l alloc8 8 + %.1570 =l alloc8 8 + %.1572 =l alloc8 8 + %.1574 =l alloc8 8 + %.1576 =l alloc4 4 + %.1577 =l alloc4 4 + %.1578 =l alloc4 4 + %.1668 =l alloc4 4 + %.1673 =l alloc8 8 + %.1675 =l alloc8 8 + %.1677 =l alloc8 8 + %.1679 =l alloc8 8 + %.1681 =l alloc8 72 + %.1700 =l alloc8 8 + %.1711 =l alloc8 8 + %.1713 =l alloc8 8 + %.1715 =l alloc8 8 + %.1717 =l alloc8 8 + %.1719 =l alloc4 4 + %.1811 =l alloc8 8 + %.1815 =l alloc8 8 + %.1819 =l alloc8 784 + %.2212 =l alloc4 2 + %.2217 =l alloc4 4 + %.2218 =l alloc4 4 + %.2219 =l alloc4 4 + %.2309 =l alloc8 80 + %.2328 =l alloc4 4 + %.2333 =l alloc4 4 + %.2336 =l alloc4 4 + %.2541 =l alloc8 8 + %.2547 =l alloc8 8 + %.2549 =l alloc4 4 + %.2552 =l alloc8 8 + %.2556 =l alloc8 8 + %.2559 =l alloc8 8 + %.2565 =l alloc8 8 + %.2567 =l alloc8 8 + %.2569 =l alloc4 4 + %.2572 =l alloc8 384 + %.2573 =l alloc8 8 + %.2577 =l alloc8 8 + %.2583 =l alloc4 4 + %.2584 =l alloc4 4 + %.2921 =l alloc4 1 + %.3028 =l alloc8 8 + %.3033 =l alloc8 8 + %.3037 =l alloc8 8 + %.3043 =l alloc8 8 + %.3047 =l alloc8 8 + %.3053 =l alloc8 8 + %.3059 =l alloc8 8 + %.3065 =l alloc8 8 + %.3076 =l alloc8 8 + %.3082 =l alloc8 8 + %.3088 =l alloc8 1008 + %.3653 =l alloc8 8 + %.3655 =l alloc4 4 + %.3658 =l alloc4 1 + %.3661 =l alloc4 4 + %.3662 =l alloc4 4 + %.3663 =l alloc4 4 + %.3692 =l alloc4 1 + %.3695 =l alloc8 8 + %.3697 =l alloc4 4 + %.3702 =l alloc4 4 + %.3705 =l alloc4 36 + %.3706 =l alloc4 4 + %.3770 =l alloc4 2 + %.3820 =l alloc4 4 + %.3823 =l alloc4 4 + %.3826 =l alloc4 4 + %.3829 =l alloc4 4 + %.3832 =l alloc4 4 + %.3835 =l alloc8 8 + %.3841 =l alloc8 8 + %.3847 =l alloc8 8 + %.3849 =l alloc8 8 + %.3855 =l alloc8 576 + %.4204 =l alloc4 4 + %.4207 =l alloc4 4 + %.4210 =l alloc8 8 + %.4212 =l alloc4 4 + %.4213 =l alloc4 4 + %.4214 =l alloc4 4 + %.4219 =l alloc8 40 + %.4220 =l alloc4 4 + %.4378 =l alloc8 8 + %.4384 =l alloc8 8 + %.4386 =l alloc8 8 + %.4387 =l alloc4 4 + %.4390 =l alloc4 4 + %.4393 =l alloc4 2 + %.4396 =l alloc4 4 + %.4397 =l alloc4 4 +@body.1024 + %.6 =l add %.5, 0 + %.7 =w copy 0 + storeb %.7, %.6 + %.9 =l add %.8, 0 + storel $g_634, %.9 + %.11 =l add %.10, 0 + %.12 =w copy 117 + storeb %.12, %.11 + %.13 =l add %.10, 1 + storeb 0, %.13 + %.14 =l add %.10, 2 + storeh 0, %.14 + %.15 =l add %.10, 4 + storew 0, %.15 + %.16 =l add %.10, 8 + %.17 =l copy 12657291016094885149 + storel %.17, %.16 + %.18 =l add %.10, 16 + %.19 =l extsw 0 + %.20 =l sub %.19, 3 + %.21 =w copy %.20 + storew %.21, %.18 + %.22 =l add %.10, 20 + storew 0, %.22 + %.23 =l add %.10, 24 + storel 16915919946376103100, %.23 + %.24 =l add %.10, 32 + %.25 =w copy 2 + storew %.25, %.24 + %.26 =l add %.10, 36 + %.27 =w copy 1649859335 + storew %.27, %.26 + %.28 =l add %.10, 40 + %.29 =w copy 1843708338 + storew %.29, %.28 + %.30 =l add %.10, 44 + %.31 =w copy 0 + storew %.31, %.30 + %.32 =l add %.10, 48 + %.33 =w copy 474072632 + storew %.33, %.32 + %.34 =l add %.10, 52 + storew 0, %.34 + %.36 =l add %.35, 0 + storel $g_201, %.36 + %.38 =l add %.37, 0 + storel $g_619, %.38 + %.40 =l add %.39, 0 + storel %.37, %.40 + %.41 =l add %.39, 8 + storel %.37, %.41 + %.42 =l add %.39, 16 + storel %.37, %.42 + %.43 =l add %.39, 24 + storel %.37, %.43 + %.44 =l add %.39, 32 + storel %.37, %.44 + %.45 =l add %.39, 40 + storel %.37, %.45 + %.46 =l add %.39, 48 + storel %.37, %.46 + %.47 =l add %.39, 56 + storel %.37, %.47 + %.48 =l add %.39, 64 + storel %.37, %.48 + %.49 =l add %.39, 72 + storel %.37, %.49 + %.50 =l add %.39, 80 + storel %.37, %.50 + %.51 =l add %.39, 88 + storel %.37, %.51 + %.52 =l add %.39, 96 + storel %.37, %.52 + %.53 =l add %.39, 104 + %.54 =l extsw 0 + %.55 =l copy %.54 + storel %.55, %.53 + %.56 =l add %.39, 112 + storel %.37, %.56 + %.57 =l add %.39, 120 + storel %.37, %.57 + %.58 =l add %.39, 128 + storel %.37, %.58 + %.59 =l add %.39, 136 + storel %.37, %.59 + %.60 =l add %.39, 144 + storel %.37, %.60 + %.61 =l add %.39, 152 + storel %.37, %.61 + %.62 =l add %.39, 160 + storel %.37, %.62 + %.63 =l add %.39, 168 + storel %.37, %.63 + %.64 =l add %.39, 176 + storel %.37, %.64 + %.65 =l add %.39, 184 + storel %.37, %.65 + %.66 =l add %.39, 192 + storel %.37, %.66 + %.67 =l add %.39, 200 + storel %.37, %.67 + %.68 =l add %.39, 208 + storel %.37, %.68 + %.69 =l add %.39, 216 + storel %.37, %.69 + %.70 =l add %.39, 224 + storel %.37, %.70 + %.71 =l add %.39, 232 + storel %.37, %.71 + %.72 =l add %.39, 240 + storel %.37, %.72 + %.73 =l add %.39, 248 + storel %.37, %.73 + %.74 =l add %.39, 256 + storel %.37, %.74 + %.75 =l add %.39, 264 + storel %.37, %.75 + %.76 =l add %.39, 272 + storel %.37, %.76 + %.77 =l add %.39, 280 + storel %.37, %.77 + %.78 =l add %.39, 288 + storel %.37, %.78 + %.79 =l add %.39, 296 + storel %.37, %.79 + %.80 =l add %.39, 304 + storel %.37, %.80 + %.81 =l add %.39, 312 + storel %.37, %.81 + %.82 =l add %.39, 320 + storel %.37, %.82 + %.83 =l add %.39, 328 + storel %.37, %.83 + %.85 =l add %.84, 0 + %.86 =w copy 2656057619 + storew %.86, %.85 + %.88 =l add %.87, 0 + storel %.37, %.88 + %.90 =l add %.89, 0 + %.91 =w copy 4005622477 + storew %.91, %.90 + %.92 =l add %.89, 4 + %.93 =w copy 18446744073709551615 + storew %.93, %.92 + %.94 =l add %.89, 8 + %.95 =w copy 52208 + storeh %.95, %.94 + %.96 =l add %.89, 10 + storeh 0, %.96 + %.97 =l add %.89, 12 + %.98 =w copy 1484313967 + storew %.98, %.97 + %.99 =l add %.89, 16 + %.100 =w copy 354572175 + storew %.100, %.99 + %.102 =l add %.101, 0 + %.103 =w copy 1081308049 + storew %.103, %.102 + %.105 =l add %.104, 0 + %.106 =l extsw 0 + %.107 =l sub %.106, 1 + %.108 =w copy %.107 + storew %.108, %.105 + %.111 =l add %.110, 0 + %.112 =w copy 27520 + storeh %.112, %.111 + %.114 =l add %.113, 0 + %.115 =l extsw 0 + %.116 =l sub %.115, 6 + %.117 =w copy %.116 + storew %.117, %.114 + %.119 =l add %.118, 0 + %.120 =l extsw 0 + %.121 =l sub %.120, 8 + %.122 =w copy %.121 + storew %.122, %.119 + %.124 =l add %.123, 0 + %.125 =w copy 0 + storew %.125, %.124 + %.127 =l add %.126, 0 + %.128 =w copy 3763325653 + storew %.128, %.127 + %.130 =l add %.129, 0 + %.131 =w copy 2542601390 + storew %.131, %.130 + %.134 =l add %.133, 0 + %.135 =l extsw 0 + %.136 =l sub %.135, 1 + %.137 =w copy %.136 + storew %.137, %.134 + %.138 =l add %.133, 4 + %.139 =w copy 1 + storew %.139, %.138 + %.140 =l add %.133, 8 + %.141 =w copy 0 + storeh %.141, %.140 + %.142 =l add %.133, 10 + storeh 0, %.142 + %.143 =l add %.133, 12 + %.144 =w copy 1 + storew %.144, %.143 + %.145 =l add %.133, 16 + %.146 =w copy 1878225502 + storew %.146, %.145 + %.148 =l add %.147, 0 + %.149 =l extsw 0 + %.150 =l copy %.149 + storel %.150, %.148 + %.152 =l add %.151, 0 + %.153 =l extsw 0 + %.154 =l copy %.153 + storel %.154, %.152 + %.156 =l add %.155, 0 + %.157 =l copy %.89 + %.158 =l mul 8, 1 + %.159 =l add %.157, %.158 + %.160 =l copy %.159 + storel %.160, %.156 + %.162 =l add %.161, 0 + %.163 =w copy 44324 + storeh %.163, %.162 + %.166 =l add %.165, 0 + %.167 =l extsw 0 + %.168 =l sub %.167, 2 + %.169 =w copy %.168 + storew %.169, %.166 + %.171 =l add %.170, 0 + %.172 =w copy 18816 + storeh %.172, %.171 + storew 0, %.173 +@for_cond.1025 + %.176 =w loadsw %.173 + %.177 =w csltw %.176, 1 + jnz %.177, @for_body.1026, @for_join.1028 +@for_body.1026 + %.178 =l copy 18446744073709551606 + %.179 =w loadsw %.173 + %.180 =l extsw %.179 + %.181 =l mul %.180, 8 + %.182 =l add %.109, %.181 + storel %.178, %.182 +@for_cont.1027 + %.183 =w loadsw %.173 + %.184 =w add %.183, 1 + storew %.184, %.173 + jmp @for_cond.1025 +@for_join.1028 + storew 0, %.173 +@for_cond.1029 + %.185 =w loadsw %.173 + %.186 =w csltw %.185, 6 + jnz %.186, @for_body.1030, @for_join.1032 +@for_body.1030 + %.187 =w copy 3620798230 + %.188 =w loadsw %.173 + %.189 =l extsw %.188 + %.190 =l mul %.189, 4 + %.191 =l add %.132, %.190 + storew %.187, %.191 +@for_cont.1031 + %.192 =w loadsw %.173 + %.193 =w add %.192, 1 + storew %.193, %.173 + jmp @for_cond.1029 +@for_join.1032 + storew 0, %.173 +@for_cond.1033 + %.194 =w loadsw %.173 + %.195 =w csltw %.194, 3 + jnz %.195, @for_body.1034, @for_join.1036 +@for_body.1034 + %.196 =w copy 1 + %.197 =w loadsw %.173 + %.198 =l extsw %.197 + %.199 =l mul %.198, 4 + %.200 =l add %.164, %.199 + storew %.196, %.200 +@for_cont.1035 + %.201 =w loadsw %.173 + %.202 =w add %.201, 1 + storew %.202, %.173 + jmp @for_cond.1033 +@for_join.1036 + %.203 =w loadsb %.5 + %.204 =w extsb %.203 + %.205 =w cnew %.204, 0 + jnz %.205, @if_true.1037, @if_false.1038 +@if_true.1037 + %.207 =l add %.206, 0 + %.208 =w copy 58237 + storeh %.208, %.207 + %.210 =l add %.209, 0 + storel $g_634, %.210 + %.211 =l add %.209, 8 + storel $g_634, %.211 + %.212 =l add %.209, 16 + storel $g_634, %.212 + %.213 =l add %.209, 24 + storel $g_634, %.213 + %.214 =l add %.209, 32 + storel $g_634, %.214 + %.215 =l add %.209, 40 + storel $g_634, %.215 + %.216 =l add %.209, 48 + storel $g_634, %.216 + %.217 =l add %.209, 56 + storel $g_634, %.217 + %.218 =l add %.209, 64 + storel $g_634, %.218 + %.220 =l add %.219, 0 + %.221 =l extsw 0 + %.222 =l sub %.221, 1 + %.223 =w copy %.222 + storew %.223, %.220 + %.225 =l add %.224, 0 + %.226 =w copy 0 + storew %.226, %.225 + %.228 =l add %.227, 0 + %.229 =w copy 1423873353 + storew %.229, %.228 + %.231 =l add %.230, 0 + %.232 =w copy 141 + storeb %.232, %.231 + %.234 =l add %.233, 0 + storel $g_81, %.234 + %.236 =l add %.235, 0 + storel %.233, %.236 + %.238 =l add %.237, 0 + %.239 =w copy 3164006327 + storew %.239, %.238 + %.242 =w copy 0 + %.243 =l copy $g_518 + %.244 =l mul 36, 1 + %.245 =l add %.243, %.244 + %.246 =l copy %.245 + storew %.242, %.246 +@for_cond.1039 + %.247 =l copy $g_518 + %.248 =l mul 36, 1 + %.249 =l add %.247, %.248 + %.250 =l copy %.249 + %.251 =w loaduw %.250 + %.252 =w copy 7 + %.253 =w culew %.251, %.252 + jnz %.253, @for_body.1040, @for_join.1042 +@for_body.1040 + %.255 =l add %.254, 0 + %.256 =l copy $g_265 + %.257 =l mul 40, 1 + %.258 =l add %.256, %.257 + %.259 =l copy %.258 + storel %.259, %.255 + %.260 =w copy 0 + %.261 =l copy $g_185 + %.262 =l mul 32, 1 + %.263 =l add %.261, %.262 + %.264 =l copy %.263 + storew %.260, %.264 +@for_cond.1043 + %.265 =l copy $g_185 + %.266 =l mul 32, 1 + %.267 =l add %.265, %.266 + %.268 =l copy %.267 + %.269 =w loaduw %.268 + %.270 =w copy 7 + %.271 =w culew %.269, %.270 + jnz %.271, @for_body.1044, @for_join.1046 +@for_body.1044 + %.273 =l add %.272, 0 + %.274 =w copy 247 + storeb %.274, %.273 + %.276 =l add %.275, 0 + %.277 =w copy 115 + storeb %.277, %.276 + %.278 =l add %.275, 1 + storeb 0, %.278 + %.279 =l add %.275, 2 + storeh 0, %.279 + %.280 =l add %.275, 4 + storew 0, %.280 + %.281 =l add %.275, 8 + %.282 =l copy 15860712757478651316 + storel %.282, %.281 + %.283 =l add %.275, 16 + %.284 =w copy 0 + storew %.284, %.283 + %.285 =l add %.275, 20 + storew 0, %.285 + %.286 =l add %.275, 24 + %.287 =l copy 0 + storel %.287, %.286 + %.288 =l add %.275, 32 + %.289 =w copy 4294967295 + storew %.289, %.288 + %.290 =l add %.275, 36 + %.291 =w copy 231051218 + storew %.291, %.290 + %.292 =l add %.275, 40 + %.293 =w copy 4107508781 + storew %.293, %.292 + %.294 =l add %.275, 44 + %.295 =w copy 3780069515 + storew %.295, %.294 + %.296 =l add %.275, 48 + %.297 =w copy 2575030066 + storew %.297, %.296 + %.298 =l add %.275, 52 + storew 0, %.298 + %.299 =l loadl %.4 + %.300 =w loadsw %.299 + %.301 =l loadl $g_173 + %.302 =w loadsw %.301 + %.303 =l extsw %.302 + %.304 =l and %.303, 7 + %.305 =w copy %.304 + storew %.305, %.301 + %.306 =w or %.300, %.305 + storew %.306, %.299 + %.307 =w loaduh %.206 + %.308 =w extuh %.307 + %.309 =l extsw 0 + %.310 =l sub %.309, 10 + %.311 =w cnel %.310, 0 + jnz %.311, @logic_join.1048, @logic_right.1047 +@logic_right.1047 + %.312 =w loadub %.272 + %.313 =w extub %.312 + %.314 =w cnew %.313, 0 +@logic_join.1048 + %.315 =w phi @for_body.1044 %.311, @logic_right.1047 %.314 + %.316 =w loadsb %.5 + %.317 =w extsb %.316 + %.318 =w csgew %.315, %.317 + %.319 =w cnew %.308, %.318 + %.320 =l extsw %.319 + %.321 =w cslel 63086, %.320 + %.322 =w cnew %.321, 0 + jnz %.322, @if_true.1049, @if_false.1050 +@if_true.1049 + %.324 =l add %.323, 0 + storel $g_619, %.324 + %.326 =l add %.325, 0 + storel %.323, %.326 + %.328 =l add %.327, 0 + storel $g_84, %.328 + %.330 =l add %.329, 0 + storel $g_82, %.330 + %.332 =l add %.331, 0 + %.333 =w copy 107414150 + storew %.333, %.332 + %.335 =l loadl $g_173 + %.336 =w loadsw %.335 + %.337 =w copy %.336 + %.338 =w loadsw %.2 + %.339 =w copy %.338 + %.340 =w copy 254 + %.341 =l loadl %.325 + storel $g_619, %.341 + %.342 =w ceql $g_619, $g_619 + %.343 =w copy %.342 + %.344 =w call $safe_add_func_uint8_t_u_u(w %.340, w %.343) + %.345 =w loadsw %.2 + %.346 =l extsw %.345 + %.347 =w ceql 7045748483853119398, %.346 + %.348 =w copy %.347 + %.349 =l loadl %.327 + storew %.348, %.349 + %.350 =w cnel 65535, 0 + jnz %.350, @logic_right.1051, @logic_join.1052 +@logic_right.1051 + %.351 =l copy $g_130 + %.352 =l mul 8, 1 + %.353 =l add %.351, %.352 + %.354 =l copy %.353 + %.355 =w loadsh %.354 + %.356 =l extsh %.355 + %.357 =w loadsw $g_24 + %.358 =l extsw %.357 + %.359 =l loadl %.329 + storel %.358, %.359 + %.360 =l and %.356, %.358 + %.361 =w loadsw %.331 + %.362 =l extsw %.361 + %.363 =w csltl %.360, %.362 + %.364 =w loaduh %.206 + %.365 =w extuh %.364 + %.366 =w cnew %.363, %.365 + %.367 =w loadsb %.5 + %.368 =w extsb %.367 + %.369 =w csgtw %.366, %.368 + %.370 =w cnew %.369, 0 +@logic_join.1052 + %.371 =w phi @if_true.1049 %.350, @logic_right.1051 %.370 + %.372 =w copy %.371 + %.373 =w cnew %.348, %.372 + %.374 =w loadsw %.331 + %.375 =l extsw %.374 + %.376 =w cslel 130, %.375 + %.377 =w loadsb %.5 + %.378 =l extsb %.377 + %.379 =l xor 1351500553408859485, %.378 + %.380 =w copy %.379 + %.381 =w call $safe_div_func_uint32_t_u_u(w %.339, w %.380) + %.382 =w and %.337, %.381 + %.383 =w copy %.382 + storew %.383, %.335 + jmp @if_join.1053 +@if_false.1050 + %.384 =l extsw 2 + storel %.384, $g_82 +@for_cond.1054 + %.385 =l loadl $g_82 + %.386 =l extsw 7 + %.387 =w cslel %.385, %.386 + jnz %.387, @for_body.1055, @for_join.1057 +@for_body.1055 + %.388 =w loadsb %.5 + %.389 =w extsb %.388 + %.390 =w cnew %.389, 0 + jnz %.390, @if_true.1058, @if_false.1059 +@if_true.1058 + jmp @for_join.1057 +@if_false.1059 + %.391 =l extsw 0 + %.392 =l copy %.391 + storel %.392, %.254 +@for_cont.1056 + %.393 =l loadl $g_82 + %.394 =l extsw 1 + %.395 =l add %.393, %.394 + storel %.395, $g_82 + jmp @for_cond.1054 +@for_join.1057 + storew 0, $g_24 +@for_cond.1060 + %.396 =w loadsw $g_24 + %.397 =w csltw %.396, 6 + jnz %.397, @for_body.1061, @for_join.1063 +@for_body.1061 + %.398 =w copy 1 + %.399 =w loadsw $g_24 + %.400 =l extsw %.399 + %.401 =l mul %.400, 1 + %.402 =l add $g_132, %.401 + storeb %.398, %.402 +@for_cont.1062 + %.403 =w loadsw $g_24 + %.404 =w add %.403, 1 + storew %.404, $g_24 + jmp @for_cond.1060 +@for_join.1063 + %.405 =l loadl $g_23 + %.406 =w loadsw %.405 + %.407 =l copy %.275 + %.408 =l mul 16, 1 + %.409 =l add %.407, %.408 + %.410 =l copy %.409 + %.411 =w loadsw %.410 + %.412 =w or %.406, %.411 + storew %.412, %.405 +@if_join.1053 +@for_cont.1045 + %.413 =l copy $g_185 + %.414 =l mul 32, 1 + %.415 =l add %.413, %.414 + %.416 =l copy %.415 + %.417 =w loaduw %.416 + %.418 =w copy 1 + %.419 =w add %.417, %.418 + storew %.419, %.416 + jmp @for_cond.1043 +@for_join.1046 +@for_cont.1041 + %.420 =l copy $g_518 + %.421 =l mul 36, 1 + %.422 =l add %.420, %.421 + %.423 =l copy %.422 + %.424 =w loaduw %.423 + %.425 =w copy 1 + %.426 =w add %.424, %.425 + storew %.426, %.423 + jmp @for_cond.1039 +@for_join.1042 + %.427 =l loadl $g_38 + %.428 =l loadl %.427 + %.429 =w loadsw %.428 + %.430 =l extsw %.429 + %.431 =l xor %.430, 0 + %.432 =w copy %.431 + storew %.432, %.428 + %.433 =l copy $g_518 + %.434 =l mul 40, 1 + %.435 =l add %.433, %.434 + %.436 =l copy %.435 + storew 0, %.436 +@for_cond.1064 + %.437 =l copy $g_518 + %.438 =l mul 40, 1 + %.439 =l add %.437, %.438 + %.440 =l copy %.439 + %.441 =w loadsw %.440 + %.442 =w sub 0, 24 + %.443 =w csgew %.441, %.442 + jnz %.443, @for_body.1065, @for_join.1067 +@for_body.1065 + %.445 =l add %.444, 0 + %.446 =w copy 9830 + storeh %.446, %.445 + %.449 =l add %.448, 0 + %.450 =l extsw 0 + %.451 =l copy %.450 + storel %.451, %.449 + %.453 =l add %.452, 0 + %.454 =w copy 200348871 + storew %.454, %.453 + %.456 =l add %.455, 0 + %.457 =w copy 18446744073709551615 + storew %.457, %.456 + %.459 =l add %.458, 0 + %.460 =w copy 640759230 + storew %.460, %.459 + %.463 =l add %.462, 0 + storel %.37, %.463 + storew 0, %.464 +@for_cond.1068 + %.466 =w loadsw %.464 + %.467 =w csltw %.466, 2 + jnz %.467, @for_body.1069, @for_join.1071 +@for_body.1069 + storew 0, %.465 +@for_cond.1072 + %.468 =w loadsw %.465 + %.469 =w csltw %.468, 3 + jnz %.469, @for_body.1073, @for_join.1075 +@for_body.1073 + %.470 =w copy 4109095570 + %.471 =w loadsw %.464 + %.472 =l extsw %.471 + %.473 =l mul %.472, 12 + %.474 =l add %.447, %.473 + %.475 =w loadsw %.465 + %.476 =l extsw %.475 + %.477 =l mul %.476, 4 + %.478 =l add %.474, %.477 + storew %.470, %.478 +@for_cont.1074 + %.479 =w loadsw %.465 + %.480 =w add %.479, 1 + storew %.480, %.465 + jmp @for_cond.1072 +@for_join.1075 +@for_cont.1070 + %.481 =w loadsw %.464 + %.482 =w add %.481, 1 + storew %.482, %.464 + jmp @for_cond.1068 +@for_join.1071 + storew 0, %.464 +@for_cond.1076 + %.483 =w loadsw %.464 + %.484 =w csltw %.483, 2 + jnz %.484, @for_body.1077, @for_join.1079 +@for_body.1077 + %.485 =w copy 14430 + %.486 =w loadsw %.464 + %.487 =l extsw %.486 + %.488 =l mul %.487, 2 + %.489 =l add %.461, %.488 + storeh %.485, %.489 +@for_cont.1078 + %.490 =w loadsw %.464 + %.491 =w add %.490, 1 + storew %.491, %.464 + jmp @for_cond.1076 +@for_join.1079 +@for_cont.1066 + %.492 =l copy $g_518 + %.493 =l mul 40, 1 + %.494 =l add %.492, %.493 + %.495 =l copy %.494 + %.496 =w loadsw %.495 + %.497 =w sub %.496, 1 + storew %.497, %.495 + jmp @for_cond.1064 +@for_join.1067 + %.498 =w loadsw %.224 + %.499 =l loadl $g_23 + storew %.498, %.499 + jmp @if_join.1080 +@if_false.1038 + %.501 =l add %.500, 0 + %.502 =w copy 1 + storeb %.502, %.501 + %.504 =l add %.503, 0 + storel $g_776, %.504 + %.506 =l add %.505, 0 + %.507 =l extsw 0 + %.508 =l copy %.507 + storel %.508, %.506 + %.510 =l add %.509, 0 + %.511 =w copy 220 + storeb %.511, %.510 + %.513 =l add %.512, 0 + storel $g_794, %.513 + %.514 =l add %.512, 8 + storel $g_794, %.514 + %.515 =l add %.512, 16 + storel %.89, %.515 + %.516 =l add %.512, 24 + storel $g_794, %.516 + %.517 =l add %.512, 32 + storel $g_794, %.517 + %.518 =l add %.512, 40 + storel $g_794, %.518 + %.519 =l add %.512, 48 + storel %.89, %.519 + %.520 =l add %.512, 56 + storel $g_794, %.520 + %.521 =l add %.512, 64 + storel $g_794, %.521 + %.522 =l add %.512, 72 + storel $g_130, %.522 + %.523 =l add %.512, 80 + storel $g_794, %.523 + %.524 =l add %.512, 88 + storel $g_794, %.524 + %.525 =l add %.512, 96 + storel %.89, %.525 + %.526 =l add %.512, 104 + storel %.89, %.526 + %.527 =l add %.512, 112 + storel $g_794, %.527 + %.528 =l add %.512, 120 + storel $g_794, %.528 + %.529 =l add %.512, 128 + storel $g_130, %.529 + %.530 =l add %.512, 136 + storel $g_794, %.530 + %.532 =l add %.531, 0 + storel %.8, %.532 + %.534 =l add %.533, 0 + %.535 =w copy 3959554745 + storew %.535, %.534 + %.536 =l add %.533, 4 + %.537 =w copy 0 + storew %.537, %.536 + %.538 =l add %.533, 8 + %.539 =l extsw 0 + %.540 =l sub %.539, 1 + %.541 =w copy %.540 + storew %.541, %.538 + %.542 =l add %.533, 12 + %.543 =w copy 1653568614 + storew %.543, %.542 + %.544 =l add %.533, 16 + %.545 =w copy 3252988231 + storew %.545, %.544 + %.546 =l add %.533, 20 + %.547 =w copy 3 + storew %.547, %.546 + %.548 =l add %.533, 24 + %.549 =w copy 1653568614 + storew %.549, %.548 + %.550 =l add %.533, 28 + %.551 =w copy 2004438502 + storew %.551, %.550 + %.552 =l add %.533, 32 + %.553 =w copy 3959554745 + storew %.553, %.552 + %.554 =l add %.533, 36 + %.555 =w copy 4196441402 + storew %.555, %.554 + %.556 =l add %.533, 40 + %.557 =l extsw 0 + %.558 =l sub %.557, 1 + %.559 =w copy %.558 + storew %.559, %.556 + %.560 =l add %.533, 44 + %.561 =w copy 1 + storew %.561, %.560 + %.562 =l add %.533, 48 + %.563 =w copy 0 + storew %.563, %.562 + %.564 =l add %.533, 52 + %.565 =w copy 3252988231 + storew %.565, %.564 + %.566 =l add %.533, 56 + %.567 =l extsw 0 + %.568 =l sub %.567, 9 + %.569 =w copy %.568 + storew %.569, %.566 + %.570 =l add %.533, 60 + %.571 =w copy 3 + storew %.571, %.570 + %.572 =l add %.533, 64 + %.573 =w copy 2108666265 + storew %.573, %.572 + %.574 =l add %.533, 68 + %.575 =w copy 1 + storew %.575, %.574 + %.576 =l add %.533, 72 + %.577 =w copy 3 + storew %.577, %.576 + %.578 =l add %.533, 76 + %.579 =w copy 0 + storew %.579, %.578 + %.580 =l add %.533, 80 + %.581 =w copy 3959554745 + storew %.581, %.580 + %.582 =l add %.533, 84 + %.583 =w copy 836215103 + storew %.583, %.582 + %.584 =l add %.533, 88 + %.585 =w copy 2004438502 + storew %.585, %.584 + %.586 =l add %.533, 92 + %.587 =w copy 1 + storew %.587, %.586 + %.588 =l add %.533, 96 + %.589 =w copy 1 + storew %.589, %.588 + %.590 =l add %.533, 100 + %.591 =w copy 2853350422 + storew %.591, %.590 + %.592 =l add %.533, 104 + %.593 =w copy 836215103 + storew %.593, %.592 + %.594 =l add %.533, 108 + %.595 =w copy 2853350422 + storew %.595, %.594 + %.596 =l add %.533, 112 + %.597 =w copy 0 + storew %.597, %.596 + %.598 =l add %.533, 116 + %.599 =w copy 3 + storew %.599, %.598 + %.600 =l add %.533, 120 + %.601 =w copy 1 + storew %.601, %.600 + %.602 =l add %.533, 124 + %.603 =w copy 2108666265 + storew %.603, %.602 + %.604 =l add %.533, 128 + %.605 =w copy 0 + storew %.605, %.604 + %.606 =l add %.533, 132 + %.607 =l extsw 0 + %.608 =l sub %.607, 9 + %.609 =w copy %.608 + storew %.609, %.606 + %.610 =l add %.533, 136 + %.611 =w copy 3252988231 + storew %.611, %.610 + %.612 =l add %.533, 140 + %.613 =l extsw 0 + %.614 =l sub %.613, 9 + %.615 =w copy %.614 + storew %.615, %.612 + %.616 =l add %.533, 144 + %.617 =w copy 1 + storew %.617, %.616 + %.618 =l add %.533, 148 + %.619 =w copy 263794776 + storew %.619, %.618 + %.620 =l add %.533, 152 + %.621 =w copy 4196441402 + storew %.621, %.620 + %.622 =l add %.533, 156 + %.623 =w copy 3959554745 + storew %.623, %.622 + %.624 =l add %.533, 160 + %.625 =w copy 2853350422 + storew %.625, %.624 + %.626 =l add %.533, 164 + %.627 =w copy 1653568614 + storew %.627, %.626 + %.628 =l add %.533, 168 + %.629 =w copy 0 + storew %.629, %.628 + %.630 =l add %.533, 172 + %.631 =w copy 3252988231 + storew %.631, %.630 + %.632 =l add %.533, 176 + %.633 =w copy 1653568614 + storew %.633, %.632 + %.634 =l add %.533, 180 + %.635 =w copy 1653568614 + storew %.635, %.634 + %.636 =l add %.533, 184 + %.637 =w copy 0 + storew %.637, %.636 + %.638 =l add %.533, 188 + %.639 =w copy 2853350422 + storew %.639, %.638 + %.640 =l add %.533, 192 + %.641 =w copy 0 + storew %.641, %.640 + %.642 =l add %.533, 196 + %.643 =w copy 3252988231 + storew %.643, %.642 + %.644 =l add %.533, 200 + %.645 =w copy 836215103 + storew %.645, %.644 + %.646 =l add %.533, 204 + %.647 =w copy 0 + storew %.647, %.646 + %.648 =l add %.533, 208 + %.649 =w copy 2004438502 + storew %.649, %.648 + %.650 =l add %.533, 212 + %.651 =w copy 0 + storew %.651, %.650 + %.652 =l add %.533, 216 + %.653 =w copy 836215103 + storew %.653, %.652 + %.654 =l add %.533, 220 + %.655 =l extsw 0 + %.656 =l sub %.655, 1 + %.657 =w copy %.656 + storew %.657, %.654 + %.658 =l add %.533, 224 + %.659 =l extsw 0 + %.660 =l sub %.659, 9 + %.661 =w copy %.660 + storew %.661, %.658 + %.662 =l add %.533, 228 + %.663 =w copy 1 + storew %.663, %.662 + %.664 =l add %.533, 232 + %.665 =w copy 3252988231 + storew %.665, %.664 + %.666 =l add %.533, 236 + %.667 =w copy 2208162857 + storew %.667, %.666 + %.668 =l add %.533, 240 + %.669 =w copy 4196441402 + storew %.669, %.668 + %.670 =l add %.533, 244 + %.671 =w copy 2108666265 + storew %.671, %.670 + %.672 =l add %.533, 248 + %.673 =w copy 0 + storew %.673, %.672 + %.674 =l add %.533, 252 + %.675 =w copy 4196441402 + storew %.675, %.674 + %.676 =l add %.533, 256 + %.677 =w copy 0 + storew %.677, %.676 + %.678 =l add %.533, 260 + %.679 =w copy 0 + storew %.679, %.678 + %.680 =l add %.533, 264 + %.681 =w copy 3 + storew %.681, %.680 + %.682 =l add %.533, 268 + %.683 =w copy 2004438502 + storew %.683, %.682 + %.684 =l add %.533, 272 + %.685 =l extsw 0 + %.686 =l sub %.685, 9 + %.687 =w copy %.686 + storew %.687, %.684 + %.688 =l add %.533, 276 + %.689 =w copy 2208162857 + storew %.689, %.688 + %.690 =l add %.533, 280 + %.691 =w copy 2853350422 + storew %.691, %.690 + %.692 =l add %.533, 284 + %.693 =w copy 4196441402 + storew %.693, %.692 + %.694 =l add %.533, 288 + %.695 =l extsw 0 + %.696 =l sub %.695, 1 + %.697 =w copy %.696 + storew %.697, %.694 + %.698 =l add %.533, 292 + %.699 =w copy 0 + storew %.699, %.698 + %.700 =l add %.533, 296 + %.701 =w copy 836215103 + storew %.701, %.700 + %.702 =l add %.533, 300 + %.703 =w copy 2208162857 + storew %.703, %.702 + %.704 =l add %.533, 304 + %.705 =w copy 2108666265 + storew %.705, %.704 + %.706 =l add %.533, 308 + %.707 =w copy 3252988231 + storew %.707, %.706 + %.708 =l add %.533, 312 + %.709 =w copy 1 + storew %.709, %.708 + %.710 =l add %.533, 316 + %.711 =w copy 3252988231 + storew %.711, %.710 + %.712 =l add %.533, 320 + %.713 =w copy 2208162857 + storew %.713, %.712 + %.714 =l add %.533, 324 + %.715 =w copy 2208162857 + storew %.715, %.714 + %.716 =l add %.533, 328 + %.717 =w copy 263794776 + storew %.717, %.716 + %.718 =l add %.533, 332 + %.719 =w copy 3 + storew %.719, %.718 + %.720 =l add %.533, 336 + %.721 =w copy 1653568614 + storew %.721, %.720 + %.722 =l add %.533, 340 + %.723 =w copy 2853350422 + storew %.723, %.722 + %.724 =l add %.533, 344 + %.725 =w copy 3959554745 + storew %.725, %.724 + %.726 =l add %.533, 348 + %.727 =w copy 1 + storew %.727, %.726 + %.728 =l add %.533, 352 + %.729 =w copy 3252988231 + storew %.729, %.728 + %.730 =l add %.533, 356 + %.731 =w copy 3959554745 + storew %.731, %.730 + %.732 =l add %.533, 360 + %.733 =w copy 0 + storew %.733, %.732 + %.734 =l add %.533, 364 + %.735 =w copy 0 + storew %.735, %.734 + %.736 =l add %.533, 368 + %.737 =w copy 5 + storew %.737, %.736 + %.738 =l add %.533, 372 + %.739 =w copy 3959554745 + storew %.739, %.738 + %.740 =l add %.533, 376 + %.741 =w copy 3252988231 + storew %.741, %.740 + %.742 =l add %.533, 380 + %.743 =w copy 0 + storew %.743, %.742 + %.744 =l add %.533, 384 + %.745 =w copy 3252988231 + storew %.745, %.744 + %.746 =l add %.533, 388 + %.747 =l extsw 0 + %.748 =l sub %.747, 9 + %.749 =w copy %.748 + storew %.749, %.746 + %.750 =l add %.533, 392 + %.751 =w copy 0 + storew %.751, %.750 + %.752 =l add %.533, 396 + %.753 =w copy 2108666265 + storew %.753, %.752 + %.754 =l add %.533, 400 + %.755 =l extsw 0 + %.756 =l sub %.755, 1 + %.757 =w copy %.756 + storew %.757, %.754 + %.758 =l add %.533, 404 + %.759 =w copy 5 + storew %.759, %.758 + %.760 =l add %.533, 408 + %.761 =w copy 1101784401 + storew %.761, %.760 + %.762 =l add %.533, 412 + %.763 =w copy 2523405358 + storew %.763, %.762 + %.764 =l add %.533, 416 + %.765 =l extsw 0 + %.766 =l sub %.765, 1 + %.767 =w copy %.766 + storew %.767, %.764 + %.768 =l add %.533, 420 + %.769 =w copy 1 + storew %.769, %.768 + %.770 =l add %.533, 424 + %.771 =w copy 1101784401 + storew %.771, %.770 + %.772 =l add %.533, 428 + %.773 =w copy 1101784401 + storew %.773, %.772 + %.774 =l add %.533, 432 + %.775 =w copy 1 + storew %.775, %.774 + %.776 =l add %.533, 436 + %.777 =w copy 2657468036 + storew %.777, %.776 + %.778 =l add %.533, 440 + %.779 =w copy 0 + storew %.779, %.778 + %.780 =l add %.533, 444 + %.781 =w copy 0 + storew %.781, %.780 + %.782 =l add %.533, 448 + %.783 =l extsw 0 + %.784 =l sub %.783, 9 + %.785 =w copy %.784 + storew %.785, %.782 + %.786 =l add %.533, 452 + %.787 =w copy 5 + storew %.787, %.786 + %.788 =l add %.533, 456 + %.789 =w copy 2853350422 + storew %.789, %.788 + %.790 =l add %.533, 460 + %.791 =l extsw 0 + %.792 =l sub %.791, 4 + %.793 =w copy %.792 + storew %.793, %.790 + %.794 =l add %.533, 464 + %.795 =w copy 2108666265 + storew %.795, %.794 + %.796 =l add %.533, 468 + %.797 =l extsw 0 + %.798 =l sub %.797, 1 + %.799 =w copy %.798 + storew %.799, %.796 + %.800 =l add %.533, 472 + %.801 =w copy 2657468036 + storew %.801, %.800 + %.802 =l add %.533, 476 + %.803 =w copy 0 + storew %.803, %.802 + %.804 =l add %.533, 480 + %.805 =w copy 2853350422 + storew %.805, %.804 + %.806 =l add %.533, 484 + %.807 =w copy 1101784401 + storew %.807, %.806 + %.808 =l add %.533, 488 + %.809 =w copy 51963591 + storew %.809, %.808 + %.810 =l add %.533, 492 + %.811 =w copy 5 + storew %.811, %.810 + %.812 =l add %.533, 496 + %.813 =w copy 2523405358 + storew %.813, %.812 + %.814 =l add %.533, 500 + %.815 =l extsw 0 + %.816 =l sub %.815, 1 + %.817 =w copy %.816 + storew %.817, %.814 + %.818 =l add %.533, 504 + %.819 =w copy 2853350422 + storew %.819, %.818 + %.820 =l add %.533, 508 + %.821 =w copy 1 + storew %.821, %.820 + %.822 =l add %.533, 512 + %.823 =w copy 1 + storew %.823, %.822 + %.824 =l add %.533, 516 + %.825 =w copy 1 + storew %.825, %.824 + %.826 =l add %.533, 520 + %.827 =l extsw 0 + %.828 =l sub %.827, 4 + %.829 =w copy %.828 + storew %.829, %.826 + %.830 =l add %.533, 524 + %.831 =w copy 1 + storew %.831, %.830 + %.832 =l add %.533, 528 + %.833 =w copy 2523405358 + storew %.833, %.832 + %.834 =l add %.533, 532 + %.835 =w copy 2523405358 + storew %.835, %.834 + %.836 =l add %.533, 536 + %.837 =w copy 1 + storew %.837, %.836 + %.838 =l add %.533, 540 + %.839 =w copy 3252988231 + storew %.839, %.838 + %.840 =l add %.533, 544 + %.841 =w copy 2004438502 + storew %.841, %.840 + %.842 =l add %.533, 548 + %.843 =l extsw 0 + %.844 =l sub %.843, 1 + %.845 =w copy %.844 + storew %.845, %.842 + %.846 =l add %.533, 552 + %.847 =w copy 0 + storew %.847, %.846 + %.848 =l add %.533, 556 + %.849 =w copy 2523405358 + storew %.849, %.848 + %.850 =l add %.533, 560 + %.851 =w copy 2108666265 + storew %.851, %.850 + %.852 =l add %.533, 564 + %.853 =w copy 0 + storew %.853, %.852 + %.854 =l add %.533, 568 + %.855 =l extsw 0 + %.856 =l sub %.855, 9 + %.857 =w copy %.856 + storew %.857, %.854 + %.858 =l add %.533, 572 + %.859 =w copy 3252988231 + storew %.859, %.858 + %.860 =l add %.533, 576 + %.861 =w copy 0 + storew %.861, %.860 + %.862 =l add %.533, 580 + %.863 =l extsw 0 + %.864 =l sub %.863, 1 + %.865 =w copy %.864 + storew %.865, %.862 + %.866 =l add %.533, 584 + %.867 =l extsw 0 + %.868 =l sub %.867, 1 + %.869 =w copy %.868 + storew %.869, %.866 + %.870 =l add %.533, 588 + %.871 =w copy 2004438502 + storew %.871, %.870 + %.872 =l add %.533, 592 + %.873 =w copy 2523405358 + storew %.873, %.872 + %.874 =l add %.533, 596 + %.875 =w copy 2853350422 + storew %.875, %.874 + %.876 =l add %.533, 600 + %.877 =w copy 2108666265 + storew %.877, %.876 + %.878 =l add %.533, 604 + %.879 =w copy 1101784401 + storew %.879, %.878 + %.880 =l add %.533, 608 + %.881 =w copy 263794776 + storew %.881, %.880 + %.882 =l add %.533, 612 + %.883 =w copy 2108666265 + storew %.883, %.882 + %.884 =l add %.533, 616 + %.885 =w copy 2657468036 + storew %.885, %.884 + %.886 =l add %.533, 620 + %.887 =w copy 3252988231 + storew %.887, %.886 + %.888 =l add %.533, 624 + %.889 =w copy 1 + storew %.889, %.888 + %.890 =l add %.533, 628 + %.891 =l extsw 0 + %.892 =l sub %.891, 9 + %.893 =w copy %.892 + storew %.893, %.890 + %.894 =l add %.533, 632 + %.895 =l extsw 0 + %.896 =l sub %.895, 1 + %.897 =w copy %.896 + storew %.897, %.894 + %.898 =l add %.533, 636 + %.899 =l extsw 0 + %.900 =l sub %.899, 1 + %.901 =w copy %.900 + storew %.901, %.898 + %.902 =l add %.533, 640 + %.903 =l extsw 0 + %.904 =l sub %.903, 4 + %.905 =w copy %.904 + storew %.905, %.902 + %.906 =l add %.533, 644 + %.907 =w copy 1 + storew %.907, %.906 + %.908 =l add %.533, 648 + %.909 =l extsw 0 + %.910 =l sub %.909, 4 + %.911 =w copy %.910 + storew %.911, %.908 + %.912 =l add %.533, 652 + %.913 =w copy 2657468036 + storew %.913, %.912 + %.914 =l add %.533, 656 + %.915 =w copy 2108666265 + storew %.915, %.914 + %.916 =l add %.533, 660 + %.917 =l extsw 0 + %.918 =l sub %.917, 1 + %.919 =w copy %.918 + storew %.919, %.916 + %.920 =l add %.533, 664 + %.921 =w copy 1101784401 + storew %.921, %.920 + %.922 =l add %.533, 668 + %.923 =w copy 2657468036 + storew %.923, %.922 + %.924 =l add %.533, 672 + %.925 =w copy 2853350422 + storew %.925, %.924 + %.926 =l add %.533, 676 + %.927 =w copy 2523405358 + storew %.927, %.926 + %.928 =l add %.533, 680 + %.929 =w copy 2853350422 + storew %.929, %.928 + %.930 =l add %.533, 684 + %.931 =l extsw 0 + %.932 =l sub %.931, 1 + %.933 =w copy %.932 + storew %.933, %.930 + %.934 =l add %.533, 688 + %.935 =w copy 1 + storew %.935, %.934 + %.936 =l add %.533, 692 + %.937 =w copy 0 + storew %.937, %.936 + %.938 =l add %.533, 696 + %.939 =w copy 3252988231 + storew %.939, %.938 + %.940 =l add %.533, 700 + %.941 =l extsw 0 + %.942 =l sub %.941, 4 + %.943 =w copy %.942 + storew %.943, %.940 + %.944 =l add %.533, 704 + %.945 =w copy 0 + storew %.945, %.944 + %.946 =l add %.533, 708 + %.947 =w copy 2657468036 + storew %.947, %.946 + %.948 =l add %.533, 712 + %.949 =w copy 2523405358 + storew %.949, %.948 + %.950 =l add %.533, 716 + %.951 =w copy 0 + storew %.951, %.950 + %.952 =l add %.533, 720 + %.953 =w copy 0 + storew %.953, %.952 + %.954 =l add %.533, 724 + %.955 =w copy 2004438502 + storew %.955, %.954 + %.956 =l add %.533, 728 + %.957 =l extsw 0 + %.958 =l sub %.957, 4 + %.959 =w copy %.958 + storew %.959, %.956 + %.960 =l add %.533, 732 + %.961 =w copy 1 + storew %.961, %.960 + %.962 =l add %.533, 736 + %.963 =w copy 2523405358 + storew %.963, %.962 + %.964 =l add %.533, 740 + %.965 =w copy 1 + storew %.965, %.964 + %.966 =l add %.533, 744 + %.967 =w copy 1 + storew %.967, %.966 + %.968 =l add %.533, 748 + %.969 =l extsw 0 + %.970 =l sub %.969, 9 + %.971 =w copy %.970 + storew %.971, %.968 + %.972 =l add %.533, 752 + %.973 =w copy 1 + storew %.973, %.972 + %.974 =l add %.533, 756 + %.975 =w copy 1 + storew %.975, %.974 + %.976 =l add %.533, 760 + %.977 =l extsw 0 + %.978 =l sub %.977, 1 + %.979 =w copy %.978 + storew %.979, %.976 + %.980 =l add %.533, 764 + %.981 =w copy 2853350422 + storew %.981, %.980 + %.982 =l add %.533, 768 + %.983 =w copy 263794776 + storew %.983, %.982 + %.984 =l add %.533, 772 + %.985 =w copy 2523405358 + storew %.985, %.984 + %.986 =l add %.533, 776 + %.987 =w copy 5 + storew %.987, %.986 + %.988 =l add %.533, 780 + %.989 =w copy 0 + storew %.989, %.988 + %.990 =l add %.533, 784 + %.991 =w copy 1101784401 + storew %.991, %.990 + %.992 =l add %.533, 788 + %.993 =w copy 2004438502 + storew %.993, %.992 + %.994 =l add %.533, 792 + %.995 =w copy 0 + storew %.995, %.994 + %.996 =l add %.533, 796 + %.997 =w copy 2657468036 + storew %.997, %.996 + %.998 =l add %.533, 800 + %.999 =w copy 1 + storew %.999, %.998 + %.1000 =l add %.533, 804 + %.1001 =w copy 2108666265 + storew %.1001, %.1000 + %.1002 =l add %.533, 808 + %.1003 =l extsw 0 + %.1004 =l sub %.1003, 9 + %.1005 =w copy %.1004 + storew %.1005, %.1002 + %.1006 =l add %.533, 812 + %.1007 =w copy 2853350422 + storew %.1007, %.1006 + %.1008 =l add %.533, 816 + %.1009 =w copy 5 + storew %.1009, %.1008 + %.1010 =l add %.533, 820 + %.1011 =l extsw 0 + %.1012 =l sub %.1011, 4 + %.1013 =w copy %.1012 + storew %.1013, %.1010 + %.1014 =l add %.533, 824 + %.1015 =w copy 0 + storew %.1015, %.1014 + %.1016 =l add %.533, 828 + %.1017 =l extsw 0 + %.1018 =l sub %.1017, 1 + %.1019 =w copy %.1018 + storew %.1019, %.1016 + %.1020 =l add %.533, 832 + %.1021 =w copy 2657468036 + storew %.1021, %.1020 + %.1022 =l add %.533, 836 + %.1023 =w copy 1 + storew %.1023, %.1022 + %.1024 =l add %.533, 840 + %.1025 =w copy 5 + storew %.1025, %.1024 + %.1026 =l add %.533, 844 + %.1027 =w copy 1101784401 + storew %.1027, %.1026 + %.1028 =l add %.533, 848 + %.1029 =w copy 2523405358 + storew %.1029, %.1028 + %.1030 =l add %.533, 852 + %.1031 =l extsw 0 + %.1032 =l sub %.1031, 1 + %.1033 =w copy %.1032 + storew %.1033, %.1030 + %.1034 =l add %.533, 856 + %.1035 =w copy 2523405358 + storew %.1035, %.1034 + %.1036 =l add %.533, 860 + %.1037 =w copy 5 + storew %.1037, %.1036 + %.1038 =l add %.533, 864 + %.1039 =w copy 5 + storew %.1039, %.1038 + %.1040 =l add %.533, 868 + %.1041 =w copy 1 + storew %.1041, %.1040 + %.1042 =l add %.533, 872 + %.1043 =w copy 2108666265 + storew %.1043, %.1042 + %.1044 =l add %.533, 876 + %.1045 =w copy 0 + storew %.1045, %.1044 + %.1046 =l add %.533, 880 + %.1047 =l extsw 0 + %.1048 =l sub %.1047, 4 + %.1049 =w copy %.1048 + storew %.1049, %.1046 + %.1050 =l add %.533, 884 + %.1051 =w copy 3252988231 + storew %.1051, %.1050 + %.1052 =l add %.533, 888 + %.1053 =w copy 51963591 + storew %.1053, %.1052 + %.1054 =l add %.533, 892 + %.1055 =w copy 2523405358 + storew %.1055, %.1054 + %.1056 =l add %.533, 896 + %.1057 =w copy 3252988231 + storew %.1057, %.1056 + %.1058 =l add %.533, 900 + %.1059 =w copy 1 + storew %.1059, %.1058 + %.1060 =l add %.533, 904 + %.1061 =w copy 2004438502 + storew %.1061, %.1060 + %.1062 =l add %.533, 908 + %.1063 =l extsw 0 + %.1064 =l sub %.1063, 1 + %.1065 =w copy %.1064 + storew %.1065, %.1062 + %.1066 =l add %.533, 912 + %.1067 =w copy 3252988231 + storew %.1067, %.1066 + %.1068 =l add %.533, 916 + %.1069 =w copy 2523405358 + storew %.1069, %.1068 + %.1070 =l add %.533, 920 + %.1071 =w copy 0 + storew %.1071, %.1070 + %.1072 =l add %.533, 924 + %.1073 =w copy 3252988231 + storew %.1073, %.1072 + %.1074 =l add %.533, 928 + %.1075 =l extsw 0 + %.1076 =l sub %.1075, 9 + %.1077 =w copy %.1076 + storew %.1077, %.1074 + %.1078 =l add %.533, 932 + %.1079 =w copy 0 + storew %.1079, %.1078 + %.1080 =l add %.533, 936 + %.1081 =w copy 2108666265 + storew %.1081, %.1080 + %.1082 =l add %.533, 940 + %.1083 =l extsw 0 + %.1084 =l sub %.1083, 1 + %.1085 =w copy %.1084 + storew %.1085, %.1082 + %.1086 =l add %.533, 944 + %.1087 =w copy 5 + storew %.1087, %.1086 + %.1088 =l add %.533, 948 + %.1089 =w copy 1101784401 + storew %.1089, %.1088 + %.1090 =l add %.533, 952 + %.1091 =w copy 2523405358 + storew %.1091, %.1090 + %.1092 =l add %.533, 956 + %.1093 =l extsw 0 + %.1094 =l sub %.1093, 1 + %.1095 =w copy %.1094 + storew %.1095, %.1092 + %.1096 =l add %.533, 960 + %.1097 =w copy 1 + storew %.1097, %.1096 + %.1098 =l add %.533, 964 + %.1099 =w copy 1101784401 + storew %.1099, %.1098 + %.1100 =l add %.533, 968 + %.1101 =w copy 1101784401 + storew %.1101, %.1100 + %.1102 =l add %.533, 972 + %.1103 =w copy 1 + storew %.1103, %.1102 + %.1104 =l add %.533, 976 + %.1105 =w copy 2657468036 + storew %.1105, %.1104 + %.1107 =l add %.1106, 0 + %.1108 =w copy 1 + storew %.1108, %.1107 + %.1110 =l add %.1109, 0 + %.1111 =l extsw 0 + %.1112 =l copy %.1111 + storel %.1112, %.1110 + %.1114 =l add %.1113, 0 + storel %.1109, %.1114 + %.1116 =l add %.1115, 0 + storel $g_130, %.1116 + %.1118 =l add %.1117, 0 + %.1119 =w copy 3 + storew %.1119, %.1118 + %.1121 =l add %.1120, 0 + %.1122 =w copy 67 + storeb %.1122, %.1121 + %.1126 =w copy 25 + %.1127 =l copy $g_518 + %.1128 =l mul 32, 1 + %.1129 =l add %.1127, %.1128 + %.1130 =l copy %.1129 + storew %.1126, %.1130 +@for_cond.1081 + %.1131 =l copy $g_518 + %.1132 =l mul 32, 1 + %.1133 =l add %.1131, %.1132 + %.1134 =l copy %.1133 + %.1135 =w loaduw %.1134 + %.1136 =w copy 43 + %.1137 =w cnew %.1135, %.1136 + jnz %.1137, @for_body.1082, @for_join.1084 +@for_body.1082 + %.1139 =l add %.1138, 0 + %.1140 =w copy 0 + storew %.1140, %.1139 + %.1141 =l add %.1138, 4 + %.1142 =w copy 0 + storew %.1142, %.1141 + %.1143 =l add %.1138, 8 + %.1144 =w copy 0 + storew %.1144, %.1143 + %.1145 =l add %.1138, 12 + %.1146 =w copy 0 + storew %.1146, %.1145 + %.1147 =l add %.1138, 16 + %.1148 =w copy 0 + storew %.1148, %.1147 + %.1149 =l add %.1138, 20 + %.1150 =w copy 0 + storew %.1150, %.1149 + %.1151 =l add %.1138, 24 + %.1152 =w copy 0 + storew %.1152, %.1151 + %.1153 =l add %.1138, 28 + %.1154 =w copy 0 + storew %.1154, %.1153 + %.1155 =l add %.1138, 32 + %.1156 =w copy 0 + storew %.1156, %.1155 + %.1158 =l loadl $g_23 + %.1159 =w loadsw %.1158 + %.1160 =l extsw 6 + %.1161 =l mul %.1160, 4 + %.1162 =l add %.1138, %.1161 + %.1163 =w loadsw %.1162 + %.1164 =w and %.1159, %.1163 + storew %.1164, %.1158 +@for_cont.1083 + %.1165 =l copy $g_518 + %.1166 =l mul 32, 1 + %.1167 =l add %.1165, %.1166 + %.1168 =l copy %.1167 + %.1169 =w loaduw %.1168 + %.1170 =l extuw %.1169 + %.1171 =l extsw 2 + %.1172 =l call $safe_add_func_uint64_t_u_u(l %.1170, l %.1171) + %.1173 =w copy %.1172 + %.1174 =l copy $g_518 + %.1175 =l mul 32, 1 + %.1176 =l add %.1174, %.1175 + %.1177 =l copy %.1176 + storew %.1173, %.1177 + jmp @for_cond.1081 +@for_join.1084 + %.1178 =w loadub %.500 + %.1179 =w sub %.1178, 1 + storeb %.1179, %.500 + %.1180 =w copy 27 + %.1181 =l copy %.89 + %.1182 =l mul 8, 1 + %.1183 =l add %.1181, %.1182 + %.1184 =l copy %.1183 + storeh %.1180, %.1184 +@for_cond.1085 + %.1185 =l copy %.89 + %.1186 =l mul 8, 1 + %.1187 =l add %.1185, %.1186 + %.1188 =l copy %.1187 + %.1189 =w loadsh %.1188 + %.1190 =w extsh %.1189 + %.1191 =w sub 0, 12 + %.1192 =w cslew %.1190, %.1191 + jnz %.1192, @for_body.1086, @for_join.1088 +@for_body.1086 + %.1194 =l add %.1193, 0 + %.1195 =l extsw 0 + %.1196 =l sub %.1195, 3 + %.1197 =l copy %.1196 + storel %.1197, %.1194 + %.1199 =l add %.1198, 0 + %.1200 =w copy 1 + storew %.1200, %.1199 + %.1201 =l add %.1198, 4 + %.1202 =w copy 1 + storew %.1202, %.1201 + %.1203 =l add %.1198, 8 + %.1204 =w copy 3909724799 + storew %.1204, %.1203 + %.1205 =l add %.1198, 12 + %.1206 =w copy 1 + storew %.1206, %.1205 + %.1207 =l add %.1198, 16 + %.1208 =w copy 1 + storew %.1208, %.1207 + %.1209 =l add %.1198, 20 + %.1210 =w copy 3909724799 + storew %.1210, %.1209 + %.1212 =l add %.1211, 0 + storel %.531, %.1212 + %.1214 =l add %.1213, 0 + storel %.1109, %.1214 + %.1216 =w copy 0 + storew %.1216, $g_84 +@for_cond.1089 + %.1217 =w loaduw $g_84 + %.1218 =w copy 0 + %.1219 =w culew %.1217, %.1218 + jnz %.1219, @for_body.1090, @for_join.1092 +@for_body.1090 + %.1221 =l add %.1220, 0 + %.1222 =w copy 9 + storeb %.1222, %.1221 + %.1223 =l add %.1220, 1 + %.1224 =w copy 21 + storeb %.1224, %.1223 + %.1225 =l add %.1220, 2 + %.1226 =w copy 1 + storeb %.1226, %.1225 + %.1227 =l add %.1220, 3 + %.1228 =w copy 1 + storeb %.1228, %.1227 + %.1229 =l add %.1220, 4 + %.1230 =w copy 1 + storeb %.1230, %.1229 + %.1231 =l add %.1220, 5 + %.1232 =w copy 21 + storeb %.1232, %.1231 + %.1233 =l add %.1220, 6 + %.1234 =w copy 9 + storeb %.1234, %.1233 + %.1235 =l add %.1220, 7 + %.1236 =w copy 70 + storeb %.1236, %.1235 + %.1237 =l add %.1220, 8 + %.1238 =w copy 5 + storeb %.1238, %.1237 + %.1239 =l add %.1220, 9 + %.1240 =w copy 70 + storeb %.1240, %.1239 + %.1241 =l add %.1220, 10 + %.1242 =w copy 9 + storeb %.1242, %.1241 + %.1243 =l add %.1220, 11 + %.1244 =w copy 21 + storeb %.1244, %.1243 + %.1245 =l add %.1220, 12 + %.1246 =w copy 1 + storeb %.1246, %.1245 + %.1247 =l add %.1220, 13 + %.1248 =w copy 1 + storeb %.1248, %.1247 + %.1249 =l add %.1220, 14 + %.1250 =w copy 1 + storeb %.1250, %.1249 + %.1251 =l add %.1220, 15 + %.1252 =w copy 21 + storeb %.1252, %.1251 + %.1253 =l add %.1220, 16 + %.1254 =w copy 9 + storeb %.1254, %.1253 + %.1255 =l add %.1220, 17 + %.1256 =w copy 70 + storeb %.1256, %.1255 + %.1257 =l add %.1220, 18 + %.1258 =w copy 5 + storeb %.1258, %.1257 + %.1259 =l add %.1220, 19 + %.1260 =w copy 70 + storeb %.1260, %.1259 + %.1262 =l add %.1261, 0 + storel $g_50, %.1262 + %.1264 =l add %.1263, 0 + %.1265 =w copy 2636067377 + storew %.1265, %.1264 + %.1267 =l add %.1266, 0 + %.1268 =w copy 1 + storew %.1268, %.1267 + %.1270 =l add %.1269, 0 + %.1271 =w copy 3650403282 + storew %.1271, %.1270 + %.1273 =l add %.1272, 0 + %.1274 =w copy 6 + storew %.1274, %.1273 + %.1277 =l add %.1276, 0 + storel $g_58, %.1277 + %.1279 =l add %.1278, 0 + %.1280 =l copy $g_185 + %.1281 =l mul 8, 1 + %.1282 =l add %.1280, %.1281 + %.1283 =l copy %.1282 + storel %.1283, %.1279 + %.1285 =l add %.1284, 0 + %.1286 =w copy 7 + storeb %.1286, %.1285 + %.1288 =l add %.1287, 0 + storel $g_81, %.1288 + %.1290 =l add %.1289, 0 + storel %.5, %.1290 + %.1292 =l add %.1291, 0 + %.1293 =w copy 54 + storeb %.1293, %.1292 + %.1294 =l add %.1291, 1 + storeb 0, %.1294 + %.1295 =l add %.1291, 2 + storeh 0, %.1295 + %.1296 =l add %.1291, 4 + storew 0, %.1296 + %.1297 =l add %.1291, 8 + %.1298 =l extsw 0 + %.1299 =l sub %.1298, 7 + %.1300 =l copy %.1299 + storel %.1300, %.1297 + %.1301 =l add %.1291, 16 + %.1302 =w copy 1 + storew %.1302, %.1301 + %.1303 =l add %.1291, 20 + storew 0, %.1303 + %.1304 =l add %.1291, 24 + storel 11604192345489365348, %.1304 + %.1305 =l add %.1291, 32 + %.1306 =w copy 9 + storew %.1306, %.1305 + %.1307 =l add %.1291, 36 + %.1308 =w copy 0 + storew %.1308, %.1307 + %.1309 =l add %.1291, 40 + %.1310 =w copy 3737664569 + storew %.1310, %.1309 + %.1311 =l add %.1291, 44 + %.1312 =w copy 2923809832 + storew %.1312, %.1311 + %.1313 =l add %.1291, 48 + %.1314 =w copy 0 + storew %.1314, %.1313 + %.1315 =l add %.1291, 52 + storew 0, %.1315 + storew 0, %.1316 +@for_cond.1093 + %.1319 =w loadsw %.1316 + %.1320 =w csltw %.1319, 3 + jnz %.1320, @for_body.1094, @for_join.1096 +@for_body.1094 + %.1321 =w copy 4 + %.1322 =w loadsw %.1316 + %.1323 =l extsw %.1322 + %.1324 =l mul %.1323, 4 + %.1325 =l add %.1275, %.1324 + storew %.1321, %.1325 +@for_cont.1095 + %.1326 =w loadsw %.1316 + %.1327 =w add %.1326, 1 + storew %.1327, %.1316 + jmp @for_cond.1093 +@for_join.1096 + %.1328 =w copy 0 + %.1329 =l copy $g_130 + %.1330 =l mul 8, 1 + %.1331 =l add %.1329, %.1330 + %.1332 =l copy %.1331 + storeh %.1328, %.1332 +@for_cond.1097 + %.1333 =l copy $g_130 + %.1334 =l mul 8, 1 + %.1335 =l add %.1333, %.1334 + %.1336 =l copy %.1335 + %.1337 =w loadsh %.1336 + %.1338 =w extsh %.1337 + %.1339 =w csgew %.1338, 0 + jnz %.1339, @for_body.1098, @for_join.1100 +@for_body.1098 + %.1340 =l copy %.10 + %.1341 =l mul 48, 1 + %.1342 =l add %.1340, %.1341 + %.1343 =l copy %.1342 + storew 0, %.1343 +@for_cond.1101 + %.1344 =l copy %.10 + %.1345 =l mul 48, 1 + %.1346 =l add %.1344, %.1345 + %.1347 =l copy %.1346 + %.1348 =w loadsw %.1347 + %.1349 =w cslew %.1348, 0 + jnz %.1349, @for_body.1102, @for_join.1104 +@for_body.1102 + %.1351 =l add %.1350, 0 + storel $g_794, %.1351 + storel %.2, %.4 + %.1352 =l loadl %.1350 + %.1353 =l loaduw %.89 + storew %.1353, %.1352 + %.1354 =l add %.89, 4 + %.1355 =l add %.1352, 4 + %.1356 =l loaduw %.1354 + storew %.1356, %.1355 + %.1357 =l add %.1354, 4 + %.1358 =l add %.1355, 4 + %.1359 =l loaduw %.1357 + storew %.1359, %.1358 + %.1360 =l add %.1357, 4 + %.1361 =l add %.1358, 4 + %.1362 =l loaduw %.1360 + storew %.1362, %.1361 + %.1363 =l add %.1360, 4 + %.1364 =l add %.1361, 4 + %.1365 =l loaduw %.1363 + storew %.1365, %.1364 + %.1366 =l add %.1363, 4 + %.1367 =l add %.1364, 4 +@for_cont.1103 + %.1368 =l copy %.10 + %.1369 =l mul 48, 1 + %.1370 =l add %.1368, %.1369 + %.1371 =l copy %.1370 + %.1372 =w loadsw %.1371 + %.1373 =w add %.1372, 1 + storew %.1373, %.1371 + jmp @for_cond.1101 +@for_join.1104 + %.1374 =l extsw 0 + storel %.1374, $g_82 +@for_cond.1105 + %.1375 =l loadl $g_82 + %.1376 =l extsw 1 + %.1377 =w cslel %.1375, %.1376 + jnz %.1377, @for_body.1106, @for_join.1108 +@for_body.1106 + %.1379 =w copy 6 + %.1380 =l loadl %.4 + storew %.1379, %.1380 + %.1381 =l extsw 2 + %.1382 =l mul %.1381, 2 + %.1383 =l add %.1220, %.1382 + %.1384 =l extsw 0 + %.1385 =l mul %.1384, 1 + %.1386 =l add %.1383, %.1385 + %.1387 =l extsw 0 + %.1388 =l mul %.1387, 1 + %.1389 =l add %.1386, %.1388 + %.1390 =w loadsb %.1389 + %.1391 =w extsb %.1390 + ret %.1391 +@for_cont.1107 + %.1392 =l loadl $g_82 + %.1393 =l extsw 1 + %.1394 =l add %.1392, %.1393 + storel %.1394, $g_82 + jmp @for_cond.1105 +@for_join.1108 + %.1395 =l copy $g_794 + %.1396 =l mul 12, 1 + %.1397 =l add %.1395, %.1396 + %.1398 =l copy %.1397 + storew 0, %.1398 +@for_cond.1109 + %.1399 =l copy $g_794 + %.1400 =l mul 12, 1 + %.1401 =l add %.1399, %.1400 + %.1402 =l copy %.1401 + %.1403 =w loadsw %.1402 + %.1404 =w cslew %.1403, 0 + jnz %.1404, @for_body.1110, @for_join.1112 +@for_body.1110 + %.1406 =l add %.1405, 0 + storel %.1261, %.1406 + %.1408 =l add %.1407, 0 + storel $g_23, %.1408 + %.1409 =l add %.1407, 8 + storel $g_173, %.1409 + %.1410 =l add %.1407, 16 + storel $g_173, %.1410 + %.1411 =l add %.1407, 24 + storel $g_23, %.1411 + %.1412 =l add %.1407, 32 + %.1413 =l extsw 0 + %.1414 =l copy %.1413 + storel %.1414, %.1412 + %.1415 =l add %.1407, 40 + storel $g_23, %.1415 + %.1416 =l add %.1407, 48 + storel $g_23, %.1416 + %.1417 =l add %.1407, 56 + storel $g_173, %.1417 + %.1418 =l add %.1407, 64 + storel $g_173, %.1418 + %.1419 =l add %.1407, 72 + storel $g_23, %.1419 + %.1420 =l add %.1407, 80 + %.1421 =l extsw 0 + %.1422 =l copy %.1421 + storel %.1422, %.1420 + %.1423 =l add %.1407, 88 + storel $g_23, %.1423 + %.1424 =l add %.1407, 96 + storel $g_173, %.1424 + %.1425 =l add %.1407, 104 + storel $g_173, %.1425 + %.1426 =l add %.1407, 112 + storel $g_173, %.1426 + %.1427 =l add %.1407, 120 + storel $g_23, %.1427 + %.1428 =l add %.1407, 128 + %.1429 =l extsw 0 + %.1430 =l copy %.1429 + storel %.1430, %.1428 + %.1431 =l add %.1407, 136 + storel $g_23, %.1431 + %.1432 =l add %.1407, 144 + storel $g_173, %.1432 + %.1433 =l add %.1407, 152 + storel $g_173, %.1433 + %.1434 =l add %.1407, 160 + storel $g_23, %.1434 + %.1435 =l add %.1407, 168 + storel $g_23, %.1435 + %.1436 =l add %.1407, 176 + storel $g_173, %.1436 + %.1437 =l add %.1407, 184 + storel $g_23, %.1437 + %.1438 =l add %.1407, 192 + storel $g_23, %.1438 + %.1439 =l add %.1407, 200 + storel $g_23, %.1439 + %.1440 =l add %.1407, 208 + storel $g_23, %.1440 + %.1441 =l add %.1407, 216 + storel $g_173, %.1441 + %.1442 =l add %.1407, 224 + storel $g_23, %.1442 + %.1443 =l add %.1407, 232 + storel $g_173, %.1443 + %.1444 =l add %.1407, 240 + %.1445 =l extsw 0 + %.1446 =l copy %.1445 + storel %.1446, %.1444 + %.1447 =l add %.1407, 248 + %.1448 =l extsw 0 + %.1449 =l copy %.1448 + storel %.1449, %.1447 + %.1450 =l add %.1407, 256 + storel $g_173, %.1450 + %.1451 =l add %.1407, 264 + storel $g_23, %.1451 + %.1452 =l add %.1407, 272 + storel $g_173, %.1452 + %.1453 =l add %.1407, 280 + storel $g_23, %.1453 + %.1454 =l add %.1407, 288 + storel $g_23, %.1454 + %.1455 =l add %.1407, 296 + storel $g_23, %.1455 + %.1456 =l add %.1407, 304 + storel $g_23, %.1456 + %.1457 =l add %.1407, 312 + storel $g_173, %.1457 + %.1458 =l add %.1407, 320 + storel $g_23, %.1458 + %.1459 =l add %.1407, 328 + storel $g_23, %.1459 + %.1460 =l add %.1407, 336 + storel $g_173, %.1460 + %.1461 =l add %.1407, 344 + storel $g_173, %.1461 + %.1462 =l add %.1407, 352 + storel $g_23, %.1462 + %.1463 =l add %.1407, 360 + %.1464 =l extsw 0 + %.1465 =l copy %.1464 + storel %.1465, %.1463 + %.1466 =l add %.1407, 368 + storel $g_23, %.1466 + %.1467 =l add %.1407, 376 + storel $g_173, %.1467 + %.1468 =l add %.1407, 384 + storel $g_173, %.1468 + %.1469 =l add %.1407, 392 + storel $g_173, %.1469 + %.1470 =l add %.1407, 400 + storel $g_23, %.1470 + %.1471 =l add %.1407, 408 + %.1472 =l extsw 0 + %.1473 =l copy %.1472 + storel %.1473, %.1471 + %.1474 =l add %.1407, 416 + storel $g_23, %.1474 + %.1475 =l add %.1407, 424 + storel $g_173, %.1475 + %.1476 =l add %.1407, 432 + storel $g_173, %.1476 + %.1477 =l add %.1407, 440 + storel $g_23, %.1477 + %.1478 =l add %.1407, 448 + storel $g_23, %.1478 + %.1479 =l add %.1407, 456 + storel $g_173, %.1479 + %.1480 =l add %.1407, 464 + storel $g_23, %.1480 + %.1481 =l add %.1407, 472 + storel $g_23, %.1481 + %.1482 =l add %.1407, 480 + storel $g_23, %.1482 + %.1483 =l add %.1407, 488 + storel $g_23, %.1483 + %.1484 =l add %.1407, 496 + storel $g_173, %.1484 + %.1485 =l add %.1407, 504 + storel $g_23, %.1485 + %.1486 =l add %.1407, 512 + storel $g_173, %.1486 + %.1487 =l add %.1407, 520 + %.1488 =l extsw 0 + %.1489 =l copy %.1488 + storel %.1489, %.1487 + %.1490 =l add %.1407, 528 + %.1491 =l extsw 0 + %.1492 =l copy %.1491 + storel %.1492, %.1490 + %.1493 =l add %.1407, 536 + storel $g_173, %.1493 + %.1494 =l add %.1407, 544 + storel $g_23, %.1494 + %.1495 =l add %.1407, 552 + storel $g_173, %.1495 + %.1496 =l add %.1407, 560 + storel $g_23, %.1496 + %.1497 =l add %.1407, 568 + storel $g_23, %.1497 + %.1498 =l add %.1407, 576 + storel $g_23, %.1498 + %.1499 =l add %.1407, 584 + storel $g_23, %.1499 + %.1500 =l add %.1407, 592 + storel $g_173, %.1500 + %.1501 =l add %.1407, 600 + storel $g_23, %.1501 + %.1502 =l add %.1407, 608 + storel $g_23, %.1502 + %.1503 =l add %.1407, 616 + storel $g_173, %.1503 + %.1504 =l add %.1407, 624 + storel $g_173, %.1504 + %.1505 =l add %.1407, 632 + storel $g_23, %.1505 + %.1506 =l add %.1407, 640 + %.1507 =l extsw 0 + %.1508 =l copy %.1507 + storel %.1508, %.1506 + %.1509 =l add %.1407, 648 + storel $g_23, %.1509 + %.1510 =l add %.1407, 656 + storel $g_173, %.1510 + %.1511 =l add %.1407, 664 + storel $g_173, %.1511 + %.1512 =l add %.1407, 672 + storel $g_173, %.1512 + %.1513 =l add %.1407, 680 + storel $g_23, %.1513 + %.1514 =l add %.1407, 688 + %.1515 =l extsw 0 + %.1516 =l copy %.1515 + storel %.1516, %.1514 + %.1517 =l add %.1407, 696 + storel $g_23, %.1517 + %.1518 =l add %.1407, 704 + storel $g_173, %.1518 + %.1519 =l add %.1407, 712 + storel $g_173, %.1519 + %.1520 =l add %.1407, 720 + storel $g_23, %.1520 + %.1521 =l add %.1407, 728 + storel $g_23, %.1521 + %.1522 =l add %.1407, 736 + storel $g_173, %.1522 + %.1523 =l add %.1407, 744 + storel $g_23, %.1523 + %.1524 =l add %.1407, 752 + storel $g_23, %.1524 + %.1525 =l add %.1407, 760 + storel $g_23, %.1525 + %.1526 =l add %.1407, 768 + storel $g_23, %.1526 + %.1527 =l add %.1407, 776 + storel $g_173, %.1527 + %.1528 =l add %.1407, 784 + storel $g_23, %.1528 + %.1529 =l add %.1407, 792 + storel $g_173, %.1529 + %.1530 =l add %.1407, 800 + %.1531 =l extsw 0 + %.1532 =l copy %.1531 + storel %.1532, %.1530 + %.1533 =l add %.1407, 808 + %.1534 =l extsw 0 + %.1535 =l copy %.1534 + storel %.1535, %.1533 + %.1536 =l add %.1407, 816 + storel $g_173, %.1536 + %.1537 =l add %.1407, 824 + storel $g_23, %.1537 + %.1538 =l add %.1407, 832 + storel $g_173, %.1538 + %.1539 =l add %.1407, 840 + storel $g_23, %.1539 + %.1540 =l add %.1407, 848 + storel $g_23, %.1540 + %.1541 =l add %.1407, 856 + storel $g_23, %.1541 + %.1542 =l add %.1407, 864 + storel $g_23, %.1542 + %.1543 =l add %.1407, 872 + storel $g_173, %.1543 + %.1544 =l add %.1407, 880 + storel $g_23, %.1544 + %.1545 =l add %.1407, 888 + storel $g_23, %.1545 + %.1546 =l add %.1407, 896 + storel $g_173, %.1546 + %.1547 =l add %.1407, 904 + storel $g_173, %.1547 + %.1548 =l add %.1407, 912 + storel $g_23, %.1548 + %.1549 =l add %.1407, 920 + %.1550 =l extsw 0 + %.1551 =l copy %.1550 + storel %.1551, %.1549 + %.1552 =l add %.1407, 928 + storel $g_23, %.1552 + %.1553 =l add %.1407, 936 + storel $g_173, %.1553 + %.1554 =l add %.1407, 944 + storel $g_173, %.1554 + %.1555 =l add %.1407, 952 + storel $g_173, %.1555 + %.1556 =l add %.1407, 960 + storel $g_23, %.1556 + %.1557 =l add %.1407, 968 + %.1558 =l extsw 0 + %.1559 =l copy %.1558 + storel %.1559, %.1557 + %.1560 =l add %.1407, 976 + storel $g_23, %.1560 + %.1561 =l add %.1407, 984 + storel $g_173, %.1561 + %.1562 =l add %.1407, 992 + storel $g_173, %.1562 + %.1563 =l add %.1407, 1000 + storel $g_23, %.1563 + %.1565 =l add %.1564, 0 + %.1566 =l extsw 0 + %.1567 =l copy %.1566 + storel %.1567, %.1565 + %.1569 =l add %.1568, 0 + storel %.1564, %.1569 + %.1571 =l add %.1570, 0 + storel %.1568, %.1571 + %.1573 =l add %.1572, 0 + storel %.1564, %.1573 + %.1575 =l add %.1574, 0 + storel %.1572, %.1575 + %.1579 =l loadl %.1261 + %.1580 =l loadl %.1405 + storel %.1579, %.1580 + storel %.1579, %.4 + %.1581 =l loadl $g_173 + %.1582 =w loadsw %.1581 + %.1583 =w loadsw %.101 + %.1584 =w copy %.1583 + %.1585 =w call $safe_lshift_func_int16_t_s_s(w %.1584, w 9) + %.1586 =w copy %.1585 + %.1587 =w loadsw %.2 + %.1588 =w cnew %.1587, 0 + jnz %.1588, @logic_right.1113, @logic_join.1114 +@logic_right.1113 + %.1589 =l copy $g_265 + %.1590 =l mul 32, 1 + %.1591 =l add %.1589, %.1590 + %.1592 =l copy %.1591 + %.1593 =w loaduw %.1592 + %.1594 =w add %.1593, 1 + storew %.1594, %.1592 + %.1595 =l loadl %.503 + %.1596 =l extsw 0 + %.1597 =w ceql %.1595, %.1596 + %.1598 =w copy %.1597 + %.1599 =w call $safe_rshift_func_int16_t_s_s(w %.1598, w 11) + %.1600 =w extsh %.1599 + %.1601 =l loadl %.4 + %.1602 =w loadsw %.1601 + %.1603 =l loadl $g_88 + %.1604 =l loadl %.1603 + %.1605 =l loadl %.1604 + storew %.1602, %.1605 + %.1606 =l loadl %.1570 + storel $g_422, %.1606 + %.1607 =l loadl %.1574 + storel $g_422, %.1607 + %.1608 =l loadl %.505 + %.1609 =w ceql $g_422, %.1608 + %.1610 =w copy %.1609 + %.1611 =w call $safe_lshift_func_int16_t_s_s(w %.1610, w 7) + %.1612 =w extsh %.1611 + %.1613 =w cnew %.1612, 0 + jnz %.1613, @logic_right.1115, @logic_join.1116 +@logic_right.1115 + %.1614 =l loadl $g_399 + %.1615 =w copy %.1614 + %.1616 =w loaduh $g_425 + %.1617 =w extuh %.1616 + %.1618 =w cnew %.1617, 0 + jnz %.1618, @logic_right.1117, @logic_join.1118 +@logic_right.1117 + %.1619 =l loadl %.4 + %.1620 =w loadsw %.1619 + %.1621 =w cnew %.1620, 0 +@logic_join.1118 + %.1622 =w phi @logic_right.1115 %.1618, @logic_right.1117 %.1621 + %.1623 =w copy %.1622 + %.1624 =w call $safe_sub_func_int8_t_s_s(w %.1615, w %.1623) + %.1625 =w extsb %.1624 + %.1626 =w cnew %.1625, 0 +@logic_join.1116 + %.1627 =w phi @logic_right.1113 %.1613, @logic_join.1118 %.1626 + %.1628 =w call $safe_add_func_int32_t_s_s(w %.1602, w %.1627) + %.1629 =l copy %.10 + %.1630 =l mul 40, 1 + %.1631 =l add %.1629, %.1630 + %.1632 =l copy %.1631 + %.1633 =w loadsw %.1632 + %.1634 =w copy %.1633 + %.1635 =w call $safe_sub_func_uint32_t_u_u(w %.1600, w %.1634) + %.1636 =w loadsw %.2 + %.1637 =w copy %.1636 + %.1638 =w xor %.1635, %.1637 + %.1639 =w call $safe_add_func_uint32_t_u_u(w %.1593, w %.1638) + %.1640 =w cnew %.1639, 0 +@logic_join.1114 + %.1641 =w phi @for_body.1110 %.1588, @logic_join.1116 %.1640 + %.1642 =l extsw %.1641 + %.1643 =w cnel %.1642, 394305013 + %.1644 =w copy %.1643 + %.1645 =w loadsb %.509 + %.1646 =w extsb %.1645 + %.1647 =w call $safe_mul_func_uint16_t_u_u(w %.1644, w %.1646) + %.1648 =w copy %.1647 + %.1649 =w call $safe_sub_func_int8_t_s_s(w %.1586, w %.1648) + %.1650 =w extsb %.1649 + %.1651 =w xor %.1582, %.1650 + storew %.1651, %.1581 +@for_cont.1111 + %.1652 =l copy $g_794 + %.1653 =l mul 12, 1 + %.1654 =l add %.1652, %.1653 + %.1655 =l copy %.1654 + %.1656 =w loadsw %.1655 + %.1657 =w add %.1656, 1 + storew %.1657, %.1655 + jmp @for_cond.1109 +@for_join.1112 + %.1658 =l copy $g_185 + %.1659 =l mul 16, 1 + %.1660 =l add %.1658, %.1659 + %.1661 =l copy %.1660 + storew 0, %.1661 +@for_cond.1119 + %.1662 =l copy $g_185 + %.1663 =l mul 16, 1 + %.1664 =l add %.1662, %.1663 + %.1665 =l copy %.1664 + %.1666 =w loadsw %.1665 + %.1667 =w csgew %.1666, 0 + jnz %.1667, @for_body.1120, @for_join.1122 +@for_body.1120 + %.1669 =l add %.1668, 0 + %.1670 =l extsw 0 + %.1671 =l sub %.1670, 4 + %.1672 =w copy %.1671 + storew %.1672, %.1669 + %.1674 =l add %.1673, 0 + storel $g_662, %.1674 + %.1676 =l add %.1675, 0 + storel %.1673, %.1676 + %.1678 =l add %.1677, 0 + storel %.531, %.1678 + %.1680 =l add %.1679, 0 + storel %.509, %.1680 + %.1682 =l add %.1681, 0 + storel $g_629, %.1682 + %.1683 =l add %.1681, 8 + %.1684 =l extsw 1 + %.1685 =l mul %.1684, 1 + %.1686 =l add $g_132, %.1685 + storel %.1686, %.1683 + %.1687 =l add %.1681, 16 + storel $g_629, %.1687 + %.1688 =l add %.1681, 24 + storel $g_629, %.1688 + %.1689 =l add %.1681, 32 + %.1690 =l extsw 1 + %.1691 =l mul %.1690, 1 + %.1692 =l add $g_132, %.1691 + storel %.1692, %.1689 + %.1693 =l add %.1681, 40 + storel $g_629, %.1693 + %.1694 =l add %.1681, 48 + storel $g_629, %.1694 + %.1695 =l add %.1681, 56 + %.1696 =l extsw 1 + %.1697 =l mul %.1696, 1 + %.1698 =l add $g_132, %.1697 + storel %.1698, %.1695 + %.1699 =l add %.1681, 64 + storel $g_629, %.1699 + %.1701 =l add %.1700, 0 + %.1702 =l extsw 2 + %.1703 =l mul %.1702, 24 + %.1704 =l add %.512, %.1703 + %.1705 =l extsw 0 + %.1706 =l mul %.1705, 24 + %.1707 =l add %.1704, %.1706 + %.1708 =l extsw 0 + %.1709 =l mul %.1708, 8 + %.1710 =l add %.1707, %.1709 + storel %.1710, %.1701 + %.1712 =l add %.1711, 0 + storel $g_130, %.1712 + %.1714 =l add %.1713, 0 + storel %.1711, %.1714 + %.1716 =l add %.1715, 0 + storel $g_794, %.1716 + %.1718 =l add %.1717, 0 + storel %.1715, %.1718 + %.1720 =l extsw 2 + %.1721 =l mul %.1720, 24 + %.1722 =l add %.512, %.1721 + %.1723 =l extsw 0 + %.1724 =l mul %.1723, 24 + %.1725 =l add %.1722, %.1724 + %.1726 =l extsw 0 + %.1727 =l mul %.1726, 8 + %.1728 =l add %.1725, %.1727 + storel $g_130, %.1728 + %.1729 =w cnel $g_130, $g_130 + %.1730 =l loadl %.4 + %.1731 =w loadsw %.1730 + %.1732 =l extsw 0 + %.1733 =w cnel %.84, %.1732 + %.1734 =w cnew %.1733, 0 + jnz %.1734, @logic_join.1124, @logic_right.1123 +@logic_right.1123 + %.1735 =w loadsw %.1668 + %.1736 =w loadsw %.2 + %.1737 =w copy %.1736 + %.1738 =l loadl %.1675 + storel $g_662, %.1738 + %.1739 =l loadl %.531 + %.1740 =l loadl %.1677 + storel %.1739, %.1740 + %.1741 =w cnel $g_662, %.1739 + %.1742 =w copy %.1741 + %.1743 =w call $safe_mul_func_uint16_t_u_u(w %.1737, w %.1742) + %.1744 =w extuh %.1743 + %.1745 =w csgew %.1735, %.1744 + %.1746 =w cnew %.1745, 0 +@logic_join.1124 + %.1747 =w phi @for_body.1120 %.1734, @logic_right.1123 %.1746 + %.1748 =w copy %.1747 + %.1749 =l loadl %.1679 + storeb %.1748, %.1749 + %.1750 =w loadub %.500 + %.1751 =w copy %.1750 + %.1752 =w call $safe_add_func_int8_t_s_s(w %.1748, w %.1751) + %.1753 =w extsb %.1752 + %.1754 =l copy %.10 + %.1755 =l mul 40, 1 + %.1756 =l add %.1754, %.1755 + %.1757 =l copy %.1756 + storew %.1753, %.1757 + %.1758 =w copy %.1753 + %.1759 =w copy 12 + %.1760 =w call $safe_sub_func_int8_t_s_s(w %.1758, w %.1759) + %.1761 =w extsb %.1760 + %.1762 =w copy 441 + %.1763 =w call $safe_mul_func_int16_t_s_s(w %.1761, w %.1762) + %.1764 =w copy %.1763 + %.1765 =w loadsw %.2 + %.1766 =w copy %.1765 + %.1767 =w call $safe_lshift_func_int8_t_s_u(w %.1764, w %.1766) + %.1768 =w extsb %.1767 + %.1769 =w csltw %.1731, %.1768 + %.1770 =w and %.1729, %.1769 + %.1771 =l copy %.89 + %.1772 =l mul 0, 1 + %.1773 =l add %.1771, %.1772 + %.1774 =l copy %.1773 + storew %.1770, %.1774 + %.1775 =l loadl %.4 + storew %.1770, %.1775 + %.1776 =l extsw 0 + %.1777 =l copy %.1776 + %.1778 =l loadl %.1700 + storel %.1777, %.1778 + %.1779 =l loadl %.1713 + storel %.1777, %.1779 + %.1780 =l loadl %.1717 + storel %.1777, %.1780 + %.1781 =l loadl $g_173 + %.1782 =w loadsw %.1781 + %.1783 =l loadl %.4 + storew %.1782, %.1783 +@for_cont.1121 + %.1784 =l copy $g_185 + %.1785 =l mul 16, 1 + %.1786 =l add %.1784, %.1785 + %.1787 =l copy %.1786 + %.1788 =w loadsw %.1787 + %.1789 =w sub %.1788, 1 + storew %.1789, %.1787 + jmp @for_cond.1119 +@for_join.1122 +@for_cont.1099 + %.1790 =l copy $g_130 + %.1791 =l mul 8, 1 + %.1792 =l add %.1790, %.1791 + %.1793 =l copy %.1792 + %.1794 =w loadsh %.1793 + %.1795 =w extsh %.1794 + %.1796 =w sub %.1795, 1 + %.1797 =w copy %.1796 + storeh %.1797, %.1793 + jmp @for_cond.1097 +@for_join.1100 + %.1798 =l loadl $g_38 + %.1799 =l loadl %.1798 + %.1800 =w loadsw %.1799 + %.1801 =l loadl %.1261 + %.1802 =w loadsw %.1801 + %.1803 =w copy %.1802 + %.1804 =w loadub %.500 + %.1805 =w extub %.1804 + %.1806 =w call $safe_mul_func_int16_t_s_s(w %.1803, w %.1805) + %.1807 =w extsh %.1806 + %.1808 =w or %.1800, %.1807 + storew %.1808, %.1799 + %.1809 =l loadl %.1261 + storew %.1808, %.1809 + %.1810 =w cnew %.1808, 0 + jnz %.1810, @if_true.1125, @if_false.1126 +@if_true.1125 + %.1812 =l add %.1811, 0 + %.1813 =l extsw 0 + %.1814 =l copy %.1813 + storel %.1814, %.1812 + %.1816 =l add %.1815, 0 + %.1817 =l extsw 0 + %.1818 =l copy %.1817 + storel %.1818, %.1816 + %.1820 =l add %.1819, 0 + %.1821 =l extsw 0 + %.1822 =l copy %.1821 + storel %.1822, %.1820 + %.1823 =l add %.1819, 8 + %.1824 =l extsw 0 + %.1825 =l copy %.1824 + storel %.1825, %.1823 + %.1826 =l add %.1819, 16 + %.1827 =l extsw 0 + %.1828 =l copy %.1827 + storel %.1828, %.1826 + %.1829 =l add %.1819, 24 + %.1830 =l extsw 0 + %.1831 =l copy %.1830 + storel %.1831, %.1829 + %.1832 =l add %.1819, 32 + %.1833 =l extsw 0 + %.1834 =l copy %.1833 + storel %.1834, %.1832 + %.1835 =l add %.1819, 40 + %.1836 =l extsw 0 + %.1837 =l copy %.1836 + storel %.1837, %.1835 + %.1838 =l add %.1819, 48 + %.1839 =l extsw 0 + %.1840 =l copy %.1839 + storel %.1840, %.1838 + %.1841 =l add %.1819, 56 + %.1842 =l copy $g_518 + %.1843 =l mul 44, 1 + %.1844 =l add %.1842, %.1843 + %.1845 =l copy %.1844 + storel %.1845, %.1841 + %.1846 =l add %.1819, 64 + %.1847 =l copy %.10 + %.1848 =l mul 40, 1 + %.1849 =l add %.1847, %.1848 + %.1850 =l copy %.1849 + storel %.1850, %.1846 + %.1851 =l add %.1819, 72 + %.1852 =l copy $g_518 + %.1853 =l mul 44, 1 + %.1854 =l add %.1852, %.1853 + %.1855 =l copy %.1854 + storel %.1855, %.1851 + %.1856 =l add %.1819, 80 + %.1857 =l copy %.10 + %.1858 =l mul 40, 1 + %.1859 =l add %.1857, %.1858 + %.1860 =l copy %.1859 + storel %.1860, %.1856 + %.1861 =l add %.1819, 88 + %.1862 =l copy $g_518 + %.1863 =l mul 44, 1 + %.1864 =l add %.1862, %.1863 + %.1865 =l copy %.1864 + storel %.1865, %.1861 + %.1866 =l add %.1819, 96 + %.1867 =l copy %.10 + %.1868 =l mul 40, 1 + %.1869 =l add %.1867, %.1868 + %.1870 =l copy %.1869 + storel %.1870, %.1866 + %.1871 =l add %.1819, 104 + %.1872 =l copy $g_518 + %.1873 =l mul 44, 1 + %.1874 =l add %.1872, %.1873 + %.1875 =l copy %.1874 + storel %.1875, %.1871 + %.1876 =l add %.1819, 112 + %.1877 =l extsw 0 + %.1878 =l copy %.1877 + storel %.1878, %.1876 + %.1879 =l add %.1819, 120 + %.1880 =l extsw 0 + %.1881 =l copy %.1880 + storel %.1881, %.1879 + %.1882 =l add %.1819, 128 + %.1883 =l extsw 0 + %.1884 =l copy %.1883 + storel %.1884, %.1882 + %.1885 =l add %.1819, 136 + %.1886 =l extsw 0 + %.1887 =l copy %.1886 + storel %.1887, %.1885 + %.1888 =l add %.1819, 144 + %.1889 =l extsw 0 + %.1890 =l copy %.1889 + storel %.1890, %.1888 + %.1891 =l add %.1819, 152 + %.1892 =l extsw 0 + %.1893 =l copy %.1892 + storel %.1893, %.1891 + %.1894 =l add %.1819, 160 + %.1895 =l extsw 0 + %.1896 =l copy %.1895 + storel %.1896, %.1894 + %.1897 =l add %.1819, 168 + %.1898 =l copy $g_518 + %.1899 =l mul 44, 1 + %.1900 =l add %.1898, %.1899 + %.1901 =l copy %.1900 + storel %.1901, %.1897 + %.1902 =l add %.1819, 176 + %.1903 =l copy %.10 + %.1904 =l mul 40, 1 + %.1905 =l add %.1903, %.1904 + %.1906 =l copy %.1905 + storel %.1906, %.1902 + %.1907 =l add %.1819, 184 + %.1908 =l copy $g_518 + %.1909 =l mul 44, 1 + %.1910 =l add %.1908, %.1909 + %.1911 =l copy %.1910 + storel %.1911, %.1907 + %.1912 =l add %.1819, 192 + %.1913 =l copy %.10 + %.1914 =l mul 40, 1 + %.1915 =l add %.1913, %.1914 + %.1916 =l copy %.1915 + storel %.1916, %.1912 + %.1917 =l add %.1819, 200 + %.1918 =l copy $g_518 + %.1919 =l mul 44, 1 + %.1920 =l add %.1918, %.1919 + %.1921 =l copy %.1920 + storel %.1921, %.1917 + %.1922 =l add %.1819, 208 + %.1923 =l copy %.10 + %.1924 =l mul 40, 1 + %.1925 =l add %.1923, %.1924 + %.1926 =l copy %.1925 + storel %.1926, %.1922 + %.1927 =l add %.1819, 216 + %.1928 =l copy $g_518 + %.1929 =l mul 44, 1 + %.1930 =l add %.1928, %.1929 + %.1931 =l copy %.1930 + storel %.1931, %.1927 + %.1932 =l add %.1819, 224 + %.1933 =l extsw 0 + %.1934 =l copy %.1933 + storel %.1934, %.1932 + %.1935 =l add %.1819, 232 + %.1936 =l extsw 0 + %.1937 =l copy %.1936 + storel %.1937, %.1935 + %.1938 =l add %.1819, 240 + %.1939 =l extsw 0 + %.1940 =l copy %.1939 + storel %.1940, %.1938 + %.1941 =l add %.1819, 248 + %.1942 =l extsw 0 + %.1943 =l copy %.1942 + storel %.1943, %.1941 + %.1944 =l add %.1819, 256 + %.1945 =l extsw 0 + %.1946 =l copy %.1945 + storel %.1946, %.1944 + %.1947 =l add %.1819, 264 + %.1948 =l extsw 0 + %.1949 =l copy %.1948 + storel %.1949, %.1947 + %.1950 =l add %.1819, 272 + %.1951 =l extsw 0 + %.1952 =l copy %.1951 + storel %.1952, %.1950 + %.1953 =l add %.1819, 280 + %.1954 =l copy $g_518 + %.1955 =l mul 44, 1 + %.1956 =l add %.1954, %.1955 + %.1957 =l copy %.1956 + storel %.1957, %.1953 + %.1958 =l add %.1819, 288 + %.1959 =l copy %.10 + %.1960 =l mul 40, 1 + %.1961 =l add %.1959, %.1960 + %.1962 =l copy %.1961 + storel %.1962, %.1958 + %.1963 =l add %.1819, 296 + %.1964 =l copy $g_518 + %.1965 =l mul 44, 1 + %.1966 =l add %.1964, %.1965 + %.1967 =l copy %.1966 + storel %.1967, %.1963 + %.1968 =l add %.1819, 304 + %.1969 =l copy %.10 + %.1970 =l mul 40, 1 + %.1971 =l add %.1969, %.1970 + %.1972 =l copy %.1971 + storel %.1972, %.1968 + %.1973 =l add %.1819, 312 + %.1974 =l copy $g_518 + %.1975 =l mul 44, 1 + %.1976 =l add %.1974, %.1975 + %.1977 =l copy %.1976 + storel %.1977, %.1973 + %.1978 =l add %.1819, 320 + %.1979 =l copy %.10 + %.1980 =l mul 40, 1 + %.1981 =l add %.1979, %.1980 + %.1982 =l copy %.1981 + storel %.1982, %.1978 + %.1983 =l add %.1819, 328 + %.1984 =l copy $g_518 + %.1985 =l mul 44, 1 + %.1986 =l add %.1984, %.1985 + %.1987 =l copy %.1986 + storel %.1987, %.1983 + %.1988 =l add %.1819, 336 + %.1989 =l extsw 0 + %.1990 =l copy %.1989 + storel %.1990, %.1988 + %.1991 =l add %.1819, 344 + %.1992 =l extsw 0 + %.1993 =l copy %.1992 + storel %.1993, %.1991 + %.1994 =l add %.1819, 352 + %.1995 =l extsw 0 + %.1996 =l copy %.1995 + storel %.1996, %.1994 + %.1997 =l add %.1819, 360 + %.1998 =l extsw 0 + %.1999 =l copy %.1998 + storel %.1999, %.1997 + %.2000 =l add %.1819, 368 + %.2001 =l extsw 0 + %.2002 =l copy %.2001 + storel %.2002, %.2000 + %.2003 =l add %.1819, 376 + %.2004 =l extsw 0 + %.2005 =l copy %.2004 + storel %.2005, %.2003 + %.2006 =l add %.1819, 384 + %.2007 =l extsw 0 + %.2008 =l copy %.2007 + storel %.2008, %.2006 + %.2009 =l add %.1819, 392 + %.2010 =l copy $g_518 + %.2011 =l mul 44, 1 + %.2012 =l add %.2010, %.2011 + %.2013 =l copy %.2012 + storel %.2013, %.2009 + %.2014 =l add %.1819, 400 + %.2015 =l copy %.10 + %.2016 =l mul 40, 1 + %.2017 =l add %.2015, %.2016 + %.2018 =l copy %.2017 + storel %.2018, %.2014 + %.2019 =l add %.1819, 408 + %.2020 =l copy $g_518 + %.2021 =l mul 44, 1 + %.2022 =l add %.2020, %.2021 + %.2023 =l copy %.2022 + storel %.2023, %.2019 + %.2024 =l add %.1819, 416 + %.2025 =l copy %.10 + %.2026 =l mul 40, 1 + %.2027 =l add %.2025, %.2026 + %.2028 =l copy %.2027 + storel %.2028, %.2024 + %.2029 =l add %.1819, 424 + %.2030 =l copy $g_518 + %.2031 =l mul 44, 1 + %.2032 =l add %.2030, %.2031 + %.2033 =l copy %.2032 + storel %.2033, %.2029 + %.2034 =l add %.1819, 432 + %.2035 =l copy %.10 + %.2036 =l mul 40, 1 + %.2037 =l add %.2035, %.2036 + %.2038 =l copy %.2037 + storel %.2038, %.2034 + %.2039 =l add %.1819, 440 + %.2040 =l copy $g_518 + %.2041 =l mul 44, 1 + %.2042 =l add %.2040, %.2041 + %.2043 =l copy %.2042 + storel %.2043, %.2039 + %.2044 =l add %.1819, 448 + %.2045 =l extsw 0 + %.2046 =l copy %.2045 + storel %.2046, %.2044 + %.2047 =l add %.1819, 456 + %.2048 =l extsw 0 + %.2049 =l copy %.2048 + storel %.2049, %.2047 + %.2050 =l add %.1819, 464 + %.2051 =l extsw 0 + %.2052 =l copy %.2051 + storel %.2052, %.2050 + %.2053 =l add %.1819, 472 + %.2054 =l extsw 0 + %.2055 =l copy %.2054 + storel %.2055, %.2053 + %.2056 =l add %.1819, 480 + %.2057 =l extsw 0 + %.2058 =l copy %.2057 + storel %.2058, %.2056 + %.2059 =l add %.1819, 488 + %.2060 =l extsw 0 + %.2061 =l copy %.2060 + storel %.2061, %.2059 + %.2062 =l add %.1819, 496 + %.2063 =l extsw 0 + %.2064 =l copy %.2063 + storel %.2064, %.2062 + %.2065 =l add %.1819, 504 + %.2066 =l copy $g_518 + %.2067 =l mul 44, 1 + %.2068 =l add %.2066, %.2067 + %.2069 =l copy %.2068 + storel %.2069, %.2065 + %.2070 =l add %.1819, 512 + %.2071 =l copy %.10 + %.2072 =l mul 40, 1 + %.2073 =l add %.2071, %.2072 + %.2074 =l copy %.2073 + storel %.2074, %.2070 + %.2075 =l add %.1819, 520 + %.2076 =l copy $g_518 + %.2077 =l mul 44, 1 + %.2078 =l add %.2076, %.2077 + %.2079 =l copy %.2078 + storel %.2079, %.2075 + %.2080 =l add %.1819, 528 + %.2081 =l copy %.10 + %.2082 =l mul 40, 1 + %.2083 =l add %.2081, %.2082 + %.2084 =l copy %.2083 + storel %.2084, %.2080 + %.2085 =l add %.1819, 536 + %.2086 =l copy $g_518 + %.2087 =l mul 44, 1 + %.2088 =l add %.2086, %.2087 + %.2089 =l copy %.2088 + storel %.2089, %.2085 + %.2090 =l add %.1819, 544 + %.2091 =l copy %.10 + %.2092 =l mul 40, 1 + %.2093 =l add %.2091, %.2092 + %.2094 =l copy %.2093 + storel %.2094, %.2090 + %.2095 =l add %.1819, 552 + %.2096 =l copy $g_518 + %.2097 =l mul 44, 1 + %.2098 =l add %.2096, %.2097 + %.2099 =l copy %.2098 + storel %.2099, %.2095 + %.2100 =l add %.1819, 560 + %.2101 =l extsw 0 + %.2102 =l copy %.2101 + storel %.2102, %.2100 + %.2103 =l add %.1819, 568 + %.2104 =l extsw 0 + %.2105 =l copy %.2104 + storel %.2105, %.2103 + %.2106 =l add %.1819, 576 + %.2107 =l extsw 0 + %.2108 =l copy %.2107 + storel %.2108, %.2106 + %.2109 =l add %.1819, 584 + %.2110 =l extsw 0 + %.2111 =l copy %.2110 + storel %.2111, %.2109 + %.2112 =l add %.1819, 592 + %.2113 =l extsw 0 + %.2114 =l copy %.2113 + storel %.2114, %.2112 + %.2115 =l add %.1819, 600 + %.2116 =l extsw 0 + %.2117 =l copy %.2116 + storel %.2117, %.2115 + %.2118 =l add %.1819, 608 + %.2119 =l extsw 0 + %.2120 =l copy %.2119 + storel %.2120, %.2118 + %.2121 =l add %.1819, 616 + %.2122 =l copy $g_518 + %.2123 =l mul 44, 1 + %.2124 =l add %.2122, %.2123 + %.2125 =l copy %.2124 + storel %.2125, %.2121 + %.2126 =l add %.1819, 624 + %.2127 =l copy %.10 + %.2128 =l mul 40, 1 + %.2129 =l add %.2127, %.2128 + %.2130 =l copy %.2129 + storel %.2130, %.2126 + %.2131 =l add %.1819, 632 + %.2132 =l copy $g_518 + %.2133 =l mul 44, 1 + %.2134 =l add %.2132, %.2133 + %.2135 =l copy %.2134 + storel %.2135, %.2131 + %.2136 =l add %.1819, 640 + %.2137 =l copy %.10 + %.2138 =l mul 40, 1 + %.2139 =l add %.2137, %.2138 + %.2140 =l copy %.2139 + storel %.2140, %.2136 + %.2141 =l add %.1819, 648 + %.2142 =l copy $g_518 + %.2143 =l mul 44, 1 + %.2144 =l add %.2142, %.2143 + %.2145 =l copy %.2144 + storel %.2145, %.2141 + %.2146 =l add %.1819, 656 + %.2147 =l copy %.10 + %.2148 =l mul 40, 1 + %.2149 =l add %.2147, %.2148 + %.2150 =l copy %.2149 + storel %.2150, %.2146 + %.2151 =l add %.1819, 664 + %.2152 =l copy $g_518 + %.2153 =l mul 44, 1 + %.2154 =l add %.2152, %.2153 + %.2155 =l copy %.2154 + storel %.2155, %.2151 + %.2156 =l add %.1819, 672 + %.2157 =l extsw 0 + %.2158 =l copy %.2157 + storel %.2158, %.2156 + %.2159 =l add %.1819, 680 + %.2160 =l extsw 0 + %.2161 =l copy %.2160 + storel %.2161, %.2159 + %.2162 =l add %.1819, 688 + %.2163 =l extsw 0 + %.2164 =l copy %.2163 + storel %.2164, %.2162 + %.2165 =l add %.1819, 696 + %.2166 =l extsw 0 + %.2167 =l copy %.2166 + storel %.2167, %.2165 + %.2168 =l add %.1819, 704 + %.2169 =l extsw 0 + %.2170 =l copy %.2169 + storel %.2170, %.2168 + %.2171 =l add %.1819, 712 + %.2172 =l extsw 0 + %.2173 =l copy %.2172 + storel %.2173, %.2171 + %.2174 =l add %.1819, 720 + %.2175 =l extsw 0 + %.2176 =l copy %.2175 + storel %.2176, %.2174 + %.2177 =l add %.1819, 728 + %.2178 =l copy $g_518 + %.2179 =l mul 44, 1 + %.2180 =l add %.2178, %.2179 + %.2181 =l copy %.2180 + storel %.2181, %.2177 + %.2182 =l add %.1819, 736 + %.2183 =l copy %.10 + %.2184 =l mul 40, 1 + %.2185 =l add %.2183, %.2184 + %.2186 =l copy %.2185 + storel %.2186, %.2182 + %.2187 =l add %.1819, 744 + %.2188 =l copy $g_518 + %.2189 =l mul 44, 1 + %.2190 =l add %.2188, %.2189 + %.2191 =l copy %.2190 + storel %.2191, %.2187 + %.2192 =l add %.1819, 752 + %.2193 =l copy %.10 + %.2194 =l mul 40, 1 + %.2195 =l add %.2193, %.2194 + %.2196 =l copy %.2195 + storel %.2196, %.2192 + %.2197 =l add %.1819, 760 + %.2198 =l copy $g_518 + %.2199 =l mul 44, 1 + %.2200 =l add %.2198, %.2199 + %.2201 =l copy %.2200 + storel %.2201, %.2197 + %.2202 =l add %.1819, 768 + %.2203 =l copy %.10 + %.2204 =l mul 40, 1 + %.2205 =l add %.2203, %.2204 + %.2206 =l copy %.2205 + storel %.2206, %.2202 + %.2207 =l add %.1819, 776 + %.2208 =l copy $g_518 + %.2209 =l mul 44, 1 + %.2210 =l add %.2208, %.2209 + %.2211 =l copy %.2210 + storel %.2211, %.2207 + %.2213 =l add %.2212, 0 + %.2214 =l extsw 0 + %.2215 =l sub %.2214, 1 + %.2216 =w copy %.2215 + storeh %.2216, %.2213 + %.2220 =l loadl %.35 + %.2221 =l loadl %.2220 + %.2222 =l loadl %.35 + %.2223 =l loadl %.2222 + %.2224 =l loadl %.2221 + storel %.2224, %.2223 + %.2225 =l add %.2221, 8 + %.2226 =l add %.2223, 8 + %.2227 =l loadl %.2225 + storel %.2227, %.2226 + %.2228 =l add %.2225, 8 + %.2229 =l add %.2226, 8 + %.2230 =l loadl %.2228 + storel %.2230, %.2229 + %.2231 =l add %.2228, 8 + %.2232 =l add %.2229, 8 + %.2233 =l loadl %.2231 + storel %.2233, %.2232 + %.2234 =l add %.2231, 8 + %.2235 =l add %.2232, 8 + %.2236 =l loadl %.2234 + storel %.2236, %.2235 + %.2237 =l add %.2234, 8 + %.2238 =l add %.2235, 8 + %.2239 =l loadl %.2237 + storel %.2239, %.2238 + %.2240 =l add %.2237, 8 + %.2241 =l add %.2238, 8 + %.2242 =l loadl %.2240 + storel %.2242, %.2241 + %.2243 =l add %.2240, 8 + %.2244 =l add %.2241, 8 + %.2245 =l extsw 0 + %.2246 =l mul %.2245, 8 + %.2247 =l add %.109, %.2246 + %.2248 =l loadl %.2247 + %.2249 =l add %.2248, 1 + storel %.2249, %.2247 + storew 0, $g_24 +@for_cond.1127 + %.2250 =w loadsw $g_24 + %.2251 =w csgew %.2250, 0 + jnz %.2251, @for_body.1128, @for_join.1130 +@for_body.1128 + %.2252 =w loadsw %.2 + %.2253 =w copy %.2252 + ret %.2253 +@for_cont.1129 + %.2254 =w loadsw $g_24 + %.2255 =w sub %.2254, 1 + storew %.2255, $g_24 + jmp @for_cond.1127 +@for_join.1130 + %.2256 =w loadsw %.1106 + %.2257 =w copy 1 + %.2258 =w call $safe_lshift_func_uint8_t_u_s(w %.2257, w 3) + %.2259 =w extub %.2258 + %.2260 =w loadsw %.2 + %.2261 =w copy %.2260 + %.2262 =l loadl %.37 + storeh %.2261, %.2262 + %.2263 =w extuh %.2261 + %.2264 =l loadl %.1261 + storew %.2263, %.2264 + %.2265 =l extsw 0 + %.2266 =l mul %.2265, 140 + %.2267 =l add %.533, %.2266 + %.2268 =l extsw 3 + %.2269 =l mul %.2268, 20 + %.2270 =l add %.2267, %.2269 + %.2271 =l extsw 3 + %.2272 =l mul %.2271, 4 + %.2273 =l add %.2270, %.2272 + %.2274 =w loadsw %.2273 + %.2275 =l extsw %.2274 + %.2276 =l loadl %.1193 + %.2277 =l and %.2275, %.2276 + %.2278 =w copy %.2277 + storew %.2278, %.2273 + %.2279 =w copy %.2278 + %.2280 =w loadsw %.2 + %.2281 =w loadub %.500 + %.2282 =w extub %.2281 + %.2283 =w call $safe_add_func_uint16_t_u_u(w %.2279, w %.2282) + %.2284 =w extuh %.2283 + %.2285 =w or %.2263, %.2284 + %.2286 =w copy %.2285 + %.2287 =w call $safe_add_func_uint32_t_u_u(w %.2259, w %.2286) + %.2288 =w copy %.2287 + %.2289 =w loadsw %.2 + %.2290 =l loadl %.1211 + %.2291 =l extsw 0 + %.2292 =w ceql %.2290, %.2291 + %.2293 =l loadl $g_38 + %.2294 =l loadl %.2293 + %.2295 =w loadsw %.2294 + %.2296 =l loadl $g_23 + storew %.2295, %.2296 + %.2297 =w xor %.2292, %.2295 + %.2298 =w csltw %.2289, %.2297 + %.2299 =l extsw %.2298 + %.2300 =l and %.2299, 38184 + %.2301 =w copy %.2300 + %.2302 =w call $safe_mod_func_int16_t_s_s(w %.2288, w %.2301) + %.2303 =w copy %.2302 + %.2304 =w loadsw %.2 + %.2305 =w copy %.2304 + %.2306 =w call $safe_mul_func_uint16_t_u_u(w %.2303, w %.2305) + %.2307 =w extuh %.2306 + %.2308 =w xor %.2256, %.2307 + storew %.2308, %.1106 + jmp @if_join.1131 +@if_false.1126 + %.2310 =l add %.2309, 0 + %.2311 =l copy $g_265 + %.2312 =l mul 0, 1 + %.2313 =l add %.2311, %.2312 + %.2314 =l copy %.2313 + storel %.2314, %.2310 + %.2315 =l add %.2309, 8 + storel %.500, %.2315 + %.2316 =l add %.2309, 16 + storel %.500, %.2316 + %.2317 =l add %.2309, 24 + storel %.500, %.2317 + %.2318 =l add %.2309, 32 + storel %.500, %.2318 + %.2319 =l add %.2309, 40 + %.2320 =l copy $g_265 + %.2321 =l mul 0, 1 + %.2322 =l add %.2320, %.2321 + %.2323 =l copy %.2322 + storel %.2323, %.2319 + %.2324 =l add %.2309, 48 + storel %.500, %.2324 + %.2325 =l add %.2309, 56 + storel %.500, %.2325 + %.2326 =l add %.2309, 64 + storel %.500, %.2326 + %.2327 =l add %.2309, 72 + storel %.500, %.2327 + %.2329 =l add %.2328, 0 + %.2330 =l extsw 0 + %.2331 =l sub %.2330, 8 + %.2332 =w copy %.2331 + storew %.2332, %.2329 + %.2334 =l add %.2333, 0 + %.2335 =w copy 5 + storew %.2335, %.2334 + %.2337 =l loadl $g_173 + %.2338 =w loadsw %.2337 + %.2339 =w copy 1881345186 + %.2340 =l loadl $g_38 + %.2341 =l loadl %.2340 + %.2342 =w loadsw %.2341 + %.2343 =w call $safe_mod_func_int32_t_s_s(w %.2339, w %.2342) + %.2344 =w loadsb %.509 + %.2345 =w extsb %.2344 + %.2346 =w copy 1 + %.2347 =w call $safe_lshift_func_uint16_t_u_u(w %.2345, w %.2346) + %.2348 =w extuh %.2347 + storew %.2348, %.2328 + %.2349 =l extsw %.2348 + %.2350 =l copy %.10 + %.2351 =l mul 24, 1 + %.2352 =l add %.2350, %.2351 + %.2353 =l copy %.2352 + %.2354 =l loadl %.2353 + %.2355 =l and %.2349, %.2354 + %.2356 =w cnel %.2355, 0 + jnz %.2356, @logic_right.1134, @logic_join.1135 +@logic_right.1134 + %.2357 =l loadl $g_88 + %.2358 =l loadl %.2357 + %.2359 =w cnel %.2358, %.4 + %.2360 =w cnew %.2359, 0 +@logic_join.1135 + %.2361 =w phi @if_false.1126 %.2356, @logic_right.1134 %.2360 + %.2362 =w csgtw %.2343, %.2361 + %.2363 =l loadl %.87 + %.2364 =l loadl %.2363 + %.2365 =w loaduh %.2364 + %.2366 =w extuh %.2365 + %.2367 =l loadl $g_23 + %.2368 =w loadsw %.2367 + %.2369 =w loadsw %.2333 + %.2370 =w cslew %.2368, %.2369 + %.2371 =w cnew %.2370, 0 + jnz %.2371, @logic_join.1137, @logic_right.1136 +@logic_right.1136 + %.2372 =w loadsh $g_81 + %.2373 =w extsh %.2372 + %.2374 =w cnew %.2373, 0 +@logic_join.1137 + %.2375 =w phi @logic_join.1135 %.2371, @logic_right.1136 %.2374 + %.2376 =w copy %.2375 + %.2377 =w copy 97 + %.2378 =w call $safe_mul_func_int8_t_s_s(w %.2376, w %.2377) + %.2379 =w extsb %.2378 + %.2380 =w xor %.2366, %.2379 + %.2381 =w copy %.2380 + storeh %.2381, %.2364 + %.2382 =w extuh %.2381 + %.2383 =w and %.2362, %.2382 + %.2384 =l extsw %.2383 + %.2385 =w loadsb $g_631 + %.2386 =l extsb %.2385 + %.2387 =l call $safe_div_func_int64_t_s_s(l %.2384, l %.2386) + %.2388 =l copy $g_265 + %.2389 =l mul 48, 1 + %.2390 =l add %.2388, %.2389 + %.2391 =l copy %.2390 + %.2392 =w loadsw %.2391 + %.2393 =l extsw %.2392 + %.2394 =l extsw 0 + %.2395 =l mul %.2394, 140 + %.2396 =l add %.533, %.2395 + %.2397 =l extsw 4 + %.2398 =l mul %.2397, 20 + %.2399 =l add %.2396, %.2398 + %.2400 =l extsw 3 + %.2401 =l mul %.2400, 4 + %.2402 =l add %.2399, %.2401 + %.2403 =w loadsw %.2402 + %.2404 =l extsw %.2403 + %.2405 =l call $safe_div_func_uint64_t_u_u(l %.2393, l %.2404) + %.2406 =w cnel %.2405, 0 + jnz %.2406, @logic_join.1133, @logic_right.1132 +@logic_right.1132 + %.2407 =w loadsw %.2 + %.2408 =w cnew %.2407, 0 +@logic_join.1133 + %.2409 =w phi @logic_join.1137 %.2406, @logic_right.1132 %.2408 + %.2410 =w or %.2338, %.2409 + storew %.2410, %.2337 + %.2411 =w loadsw %.2 + %.2412 =w copy %.2411 + ret %.2412 +@if_join.1131 + %.2413 =l loadl %.1261 + %.2414 =w loadsw %.2413 + %.2415 =l copy $g_130 + %.2416 =l mul 8, 1 + %.2417 =l add %.2415, %.2416 + %.2418 =l copy %.2417 + %.2419 =w loadsh %.2418 + %.2420 =w extsh %.2419 + %.2421 =l extsw 0 + %.2422 =l mul %.2421, 140 + %.2423 =l add %.533, %.2422 + %.2424 =l extsw 4 + %.2425 =l mul %.2424, 20 + %.2426 =l add %.2423, %.2425 + %.2427 =l extsw 3 + %.2428 =l mul %.2427, 4 + %.2429 =l add %.2426, %.2428 + %.2430 =w loadsw %.2429 + %.2431 =w copy %.2430 + %.2432 =l loadl %.1289 + %.2433 =w loadsb %.2432 + %.2434 =w extsb %.2433 + %.2435 =w loadsw %.2 + %.2436 =w loadsw %.2 + %.2437 =l extsw %.2436 + %.2438 =l loadl %.1276 + storel %.2437, %.2438 + %.2439 =w loadsw %.1106 + %.2440 =w loadsw %.2 + %.2441 =w cnew %.2440, 0 + jnz %.2441, @logic_join.1141, @logic_right.1140 +@logic_right.1140 + %.2442 =l loadl %.1193 + %.2443 =w cnel %.2442, 0 +@logic_join.1141 + %.2444 =w phi @if_join.1131 %.2441, @logic_right.1140 %.2443 + %.2445 =l loadl $g_23 + storew %.2444, %.2445 + %.2446 =w cnew %.2444, 0 + jnz %.2446, @logic_join.1139, @logic_right.1138 +@logic_right.1138 + %.2447 =w loadsw %.2 + %.2448 =w copy %.2447 + %.2449 =l loadl %.1276 + %.2450 =l loadl %.1278 + %.2451 =w cnel %.2449, %.2450 + %.2452 =w copy %.2451 + %.2453 =l loadl %.4 + %.2454 =w loadsw %.2453 + %.2455 =w copy %.2454 + %.2456 =w call $safe_add_func_uint32_t_u_u(w %.2452, w %.2455) + %.2457 =w copy %.2456 + %.2458 =w loadsw %.2 + %.2459 =w copy %.2458 + %.2460 =w call $safe_add_func_uint16_t_u_u(w %.2457, w %.2459) + %.2461 =w copy %.2460 + %.2462 =w call $safe_sub_func_uint8_t_u_u(w %.2448, w %.2461) + %.2463 =w copy 65 + %.2464 =w call $safe_mul_func_uint8_t_u_u(w %.2462, w %.2463) + %.2465 =w cnel 9, 0 +@logic_join.1139 + %.2466 =w phi @logic_join.1141 %.2446, @logic_right.1138 %.2465 + %.2467 =w loadsb %.1284 + %.2468 =w extsb %.2467 + %.2469 =w cslew %.2466, %.2468 + %.2470 =w and %.2439, %.2469 + storew %.2470, %.1106 + %.2471 =l extsw %.2470 + %.2472 =l xor %.2437, %.2471 + %.2473 =w copy %.2472 + %.2474 =w copy 3978 + %.2475 =w call $safe_sub_func_int16_t_s_s(w %.2473, w %.2474) + %.2476 =l loadl %.1287 + storeh %.2475, %.2476 + %.2477 =w call $safe_rshift_func_int16_t_s_s(w %.2475, w 13) + %.2478 =w copy %.2477 + %.2479 =w copy 2 + %.2480 =w call $safe_rshift_func_uint8_t_u_u(w %.2478, w %.2479) + %.2481 =w extub %.2480 + %.2482 =l extsw 5 + %.2483 =l mul %.2482, 4 + %.2484 =l add %.1198, %.2483 + storew %.2481, %.2484 + %.2485 =w loadsw %.2 + %.2486 =w csltw %.2481, %.2485 + %.2487 =l extsw %.2486 + %.2488 =l copy 18446744073709551615 + %.2489 =l call $safe_div_func_uint64_t_u_u(l %.2487, l %.2488) + %.2490 =w loadsw %.2 + %.2491 =w copy %.2490 + %.2492 =l copy $g_518 + %.2493 =l mul 32, 1 + %.2494 =l add %.2492, %.2493 + %.2495 =l copy %.2494 + %.2496 =w loaduw %.2495 + %.2497 =w copy %.2496 + %.2498 =w call $safe_mul_func_int8_t_s_s(w %.2491, w %.2497) + %.2499 =w extsb %.2498 + %.2500 =w cnew %.2435, %.2499 + %.2501 =l extsw %.2500 + %.2502 =l extsw 0 + %.2503 =l sub %.2502, 1 + %.2504 =w ceql %.2501, %.2503 + %.2505 =w or %.2434, %.2504 + %.2506 =w copy %.2505 + storeb %.2506, %.2432 + %.2507 =w copy %.2506 + %.2508 =w call $safe_div_func_uint8_t_u_u(w %.2431, w %.2507) + %.2509 =w extub %.2508 + %.2510 =w loadsw %.2 + %.2511 =w csgtw %.2509, %.2510 + %.2512 =w copy %.2511 + %.2513 =w copy 6 + %.2514 =w call $safe_rshift_func_uint8_t_u_u(w %.2512, w %.2513) + %.2515 =w extub %.2514 + %.2516 =w loadsw %.2 + %.2517 =w copy %.2516 + %.2518 =w call $safe_mul_func_int16_t_s_s(w %.2515, w %.2517) + %.2519 =w extsh %.2518 + %.2520 =l loadl %.1261 + %.2521 =w loadsw %.2520 + %.2522 =w cslew %.2519, %.2521 + %.2523 =w or %.2420, %.2522 + %.2524 =l loadl $g_173 + storew %.2523, %.2524 + %.2525 =l extsw 0 + %.2526 =l sub %.2525, 6 + %.2527 =w copy %.2526 + %.2528 =l loadl $g_173 + storew %.2527, %.2528 + %.2529 =w copy 0 + %.2530 =l copy $g_265 + %.2531 =l mul 32, 1 + %.2532 =l add %.2530, %.2531 + %.2533 =l copy %.2532 + storew %.2529, %.2533 +@for_cond.1142 + %.2534 =l copy $g_265 + %.2535 =l mul 32, 1 + %.2536 =l add %.2534, %.2535 + %.2537 =l copy %.2536 + %.2538 =w loaduw %.2537 + %.2539 =w copy 0 + %.2540 =w culew %.2538, %.2539 + jnz %.2540, @for_body.1143, @for_join.1145 +@for_body.1143 + %.2542 =l add %.2541, 0 + %.2543 =l copy $g_518 + %.2544 =l mul 44, 1 + %.2545 =l add %.2543, %.2544 + %.2546 =l copy %.2545 + storel %.2546, %.2542 + %.2548 =l add %.2547, 0 + storel $g_80, %.2548 + %.2550 =l add %.2549, 0 + %.2551 =w copy 4105257827 + storew %.2551, %.2550 + %.2553 =l add %.2552, 0 + %.2554 =l extsw 0 + %.2555 =l copy %.2554 + storel %.2555, %.2553 + %.2557 =l add %.2556, 0 + %.2558 =l copy 1 + storel %.2558, %.2557 + %.2560 =l add %.2559, 0 + storel %.531, %.2560 + %.2561 =w copy 0 + storeb %.2561, $g_631 +@for_cond.1146 + %.2562 =w loadsb $g_631 + %.2563 =w extsb %.2562 + %.2564 =w csgew %.2563, 0 + jnz %.2564, @for_body.1147, @for_join.1149 +@for_body.1147 + %.2566 =l add %.2565, 0 + storel $g_173, %.2566 + %.2568 =l add %.2567, 0 + storel %.1261, %.2568 + %.2570 =l add %.2569, 0 + %.2571 =w copy 1109499388 + storew %.2571, %.2570 + %.2574 =l add %.2573, 0 + %.2575 =l extsw 0 + %.2576 =l copy %.2575 + storel %.2576, %.2574 + %.2578 =l add %.2577, 0 + %.2579 =l copy %.1291 + %.2580 =l mul 36, 1 + %.2581 =l add %.2579, %.2580 + %.2582 =l copy %.2581 + storel %.2582, %.2578 + storew 0, %.2583 +@for_cond.1150 + %.2585 =w loadsw %.2583 + %.2586 =w csltw %.2585, 8 + jnz %.2586, @for_body.1151, @for_join.1153 +@for_body.1151 + storew 0, %.2584 +@for_cond.1154 + %.2587 =w loadsw %.2584 + %.2588 =w csltw %.2587, 6 + jnz %.2588, @for_body.1155, @for_join.1157 +@for_body.1155 + %.2589 =w loadsw %.2583 + %.2590 =l extsw %.2589 + %.2591 =l mul %.2590, 48 + %.2592 =l add %.2572, %.2591 + %.2593 =w loadsw %.2584 + %.2594 =l extsw %.2593 + %.2595 =l mul %.2594, 8 + %.2596 =l add %.2592, %.2595 + storel %.1113, %.2596 +@for_cont.1156 + %.2597 =w loadsw %.2584 + %.2598 =w add %.2597, 1 + storew %.2598, %.2584 + jmp @for_cond.1154 +@for_join.1157 +@for_cont.1152 + %.2599 =w loadsw %.2583 + %.2600 =w add %.2599, 1 + storew %.2600, %.2583 + jmp @for_cond.1150 +@for_join.1153 + %.2601 =l loadl %.2541 + %.2602 =l loadl %.2565 + storel %.2601, %.2602 + %.2603 =l loadl %.2567 + storel %.2601, %.2603 + %.2604 =l extsw 0 + %.2605 =l loadl %.2547 + %.2606 =w cnel %.2604, %.2605 + %.2607 =l copy $g_518 + %.2608 =l mul 0, 1 + %.2609 =l add %.2607, %.2608 + %.2610 =l copy %.2609 + %.2611 =w loadub %.2610 + %.2612 =w extub %.2611 + %.2613 =l loadl %.1289 + %.2614 =w loaduw $g_1018 + %.2615 =w copy %.2614 + %.2616 =w call $safe_lshift_func_uint8_t_u_s(w %.2615, w 3) + %.2617 =l extub %.2616 + %.2618 =w loadsw %.2 + %.2619 =w copy %.2618 + %.2620 =w loaduw %.2569 + %.2621 =w cultw %.2619, %.2620 + %.2622 =l extsw 0 + %.2623 =l mul %.2622, 140 + %.2624 =l add %.533, %.2623 + %.2625 =l extsw 4 + %.2626 =l mul %.2625, 20 + %.2627 =l add %.2624, %.2626 + %.2628 =l extsw 3 + %.2629 =l mul %.2628, 4 + %.2630 =l add %.2627, %.2629 + %.2631 =w loadsw %.2630 + %.2632 =w and %.2621, %.2631 + %.2633 =l extsw %.2632 + %.2634 =l or %.2633, 5192295408440469150 + %.2635 =l copy 1 + %.2636 =l or %.2634, %.2635 + %.2637 =l copy 0 + %.2638 =w cslel %.2636, %.2637 + %.2639 =l loadl %.2567 + %.2640 =l loadl %.2639 + storew %.2638, %.2640 + %.2641 =l extsw %.2638 + %.2642 =w loadsw %.2 + %.2643 =l extsw %.2642 + %.2644 =l call $safe_add_func_int64_t_s_s(l %.2641, l %.2643) + %.2645 =w copy %.2644 + %.2646 =l loadl $g_23 + storew %.2645, %.2646 + %.2647 =l extsw %.2645 + %.2648 =l extsw 0 + %.2649 =l sub %.2648, 1 + %.2650 =l and %.2647, %.2649 + %.2651 =l and 1, %.2650 + %.2652 =l xor %.2617, %.2651 + %.2653 =w loadsw %.2549 + %.2654 =l extsw %.2653 + %.2655 =l xor %.2652, %.2654 + %.2656 =w copy %.2655 + %.2657 =w copy 32877 + %.2658 =w call $safe_div_func_int16_t_s_s(w %.2656, w %.2657) + %.2659 =w ceql %.2613, %.500 + %.2660 =w copy %.2659 + %.2661 =l copy %.10 + %.2662 =l mul 36, 1 + %.2663 =l add %.2661, %.2662 + %.2664 =l copy %.2663 + %.2665 =w loaduw %.2664 + %.2666 =w copy %.2665 + %.2667 =w call $safe_mul_func_int16_t_s_s(w %.2660, w %.2666) + %.2668 =w extsh %.2667 + %.2669 =w cnew %.2668, 0 + jnz %.2669, @logic_right.1162, @logic_join.1163 +@logic_right.1162 + %.2670 =w loadsw %.2 + %.2671 =w cnew %.2670, 0 +@logic_join.1163 + %.2672 =w phi @for_join.1153 %.2669, @logic_right.1162 %.2671 + %.2673 =l extsw %.2672 + %.2674 =l copy 0 + %.2675 =l call $safe_sub_func_uint64_t_u_u(l %.2673, l %.2674) + %.2676 =l copy 0 + %.2677 =l xor %.2675, %.2676 + %.2678 =l copy %.2677 + %.2679 =l extsw 4 + %.2680 =l mul %.2679, 140 + %.2681 =l add %.533, %.2680 + %.2682 =l extsw 3 + %.2683 =l mul %.2682, 20 + %.2684 =l add %.2681, %.2683 + %.2685 =l extsw 4 + %.2686 =l mul %.2685, 4 + %.2687 =l add %.2684, %.2686 + %.2688 =w loadsw %.2687 + %.2689 =l extsw %.2688 + %.2690 =l call $safe_mod_func_int64_t_s_s(l %.2678, l %.2689) + %.2691 =w copy %.2690 + %.2692 =w call $safe_div_func_uint32_t_u_u(w %.2612, w %.2691) + %.2693 =w loadsw %.2 + %.2694 =w copy %.2693 + %.2695 =w cnew %.2692, %.2694 + %.2696 =l extsw %.2695 + %.2697 =l or 1, %.2696 + %.2698 =w copy %.2697 + %.2699 =l copy %.10 + %.2700 =l mul 40, 1 + %.2701 =l add %.2699, %.2700 + %.2702 =l copy %.2701 + %.2703 =w loadsw %.2702 + %.2704 =w copy %.2703 + %.2705 =w call $safe_add_func_uint8_t_u_u(w %.2698, w %.2704) + %.2706 =w copy %.2705 + %.2707 =l copy %.10 + %.2708 =l mul 48, 1 + %.2709 =l add %.2707, %.2708 + %.2710 =l copy %.2709 + %.2711 =w loadsw %.2710 + %.2712 =w copy %.2711 + %.2713 =w call $safe_sub_func_int8_t_s_s(w %.2706, w %.2712) + %.2714 =w extsb %.2713 + %.2715 =w cnew %.2714, 0 + jnz %.2715, @logic_join.1161, @logic_right.1160 +@logic_right.1160 + %.2716 =w cnel 63509, 0 +@logic_join.1161 + %.2717 =w phi @logic_join.1163 %.2715, @logic_right.1160 %.2716 + %.2718 =w loadsw %.2 + %.2719 =w and %.2717, %.2718 + %.2720 =w cnew %.2719, 0 + jnz %.2720, @logic_join.1159, @logic_right.1158 +@logic_right.1158 + %.2721 =l copy %.10 + %.2722 =l mul 8, 1 + %.2723 =l add %.2721, %.2722 + %.2724 =l copy %.2723 + %.2725 =l loadl %.2724 + %.2726 =w cnel %.2725, 0 +@logic_join.1159 + %.2727 =w phi @logic_join.1161 %.2720, @logic_right.1158 %.2726 + %.2728 =w xor %.2606, %.2727 + %.2729 =l extsw %.2728 + %.2730 =w cultl %.2729, 14106069369177510227 + %.2731 =w loadsw %.2 + %.2732 =w xor %.2730, %.2731 + storew %.2732, %.1272 + %.2733 =l loadl $g_23 + %.2734 =w loadsw %.2733 + %.2735 =l loadl %.1113 + storel %.2735, %.1213 + %.2736 =l extsw 0 + %.2737 =w cnel %.2735, %.2736 + %.2738 =l loadl %.1261 + %.2739 =w loadsw %.2738 + %.2740 =w csgew %.2737, %.2739 + %.2741 =l copy %.89 + %.2742 =l mul 8, 1 + %.2743 =l add %.2741, %.2742 + %.2744 =l copy %.2743 + %.2745 =w loadsh %.2744 + %.2746 =w cnel $g_2, %.5 + %.2747 =w cnew %.2746, 0 + jnz %.2747, @logic_join.1167, @logic_right.1166 +@logic_right.1166 + %.2748 =w cnel 0, 0 +@logic_join.1167 + %.2749 =w phi @logic_join.1159 %.2747, @logic_right.1166 %.2748 + %.2750 =w cnew %.2749, 0 + jnz %.2750, @logic_right.1164, @logic_join.1165 +@logic_right.1164 + %.2751 =w loadsw %.2 + %.2752 =w loadsw %.2 + %.2753 =w csltw %.2751, %.2752 + %.2754 =w loadsw %.2 + %.2755 =w copy %.2754 + %.2756 =w copy 6 + %.2757 =w call $safe_lshift_func_uint8_t_u_u(w %.2755, w %.2756) + %.2758 =w extub %.2757 + %.2759 =w cnew %.2758, 0 +@logic_join.1165 + %.2760 =w phi @logic_join.1167 %.2750, @logic_right.1164 %.2759 + %.2761 =l extsw %.2760 + %.2762 =l loadl %.1193 + %.2763 =w cslel %.2761, %.2762 + %.2764 =w copy %.2763 + %.2765 =w copy 58831 + %.2766 =w call $safe_mul_func_uint16_t_u_u(w %.2764, w %.2765) + %.2767 =w loadsw %.1106 + %.2768 =w copy %.2767 + %.2769 =w loadsw %.2 + %.2770 =w call $safe_lshift_func_uint8_t_u_s(w %.2768, w %.2769) + %.2771 =w copy %.2770 + %.2772 =l loadl %.1261 + %.2773 =w loadsw %.2772 + %.2774 =w copy %.2773 + %.2775 =w call $safe_lshift_func_int8_t_s_u(w %.2771, w %.2774) + %.2776 =w extsb %.2775 + %.2777 =w loadsw %.2 + %.2778 =w cslew %.2776, %.2777 + %.2779 =w and %.2734, %.2778 + storew %.2779, %.2733 + %.2780 =l loadl %.2573 + %.2781 =l loadl %.1115 + %.2782 =w cnel %.2780, %.2781 + %.2783 =l loadl %.1289 + %.2784 =w loadsb %.2783 + %.2785 =w extsb %.2784 + %.2786 =w loadsw %.2 + %.2787 =w or %.2785, %.2786 + %.2788 =w copy %.2787 + storeb %.2788, %.2783 + %.2789 =w extsb %.2788 + %.2790 =l loadl $g_1037 + %.2791 =l extsw 0 + %.2792 =w cnel %.2790, %.2791 + %.2793 =l loadl %.2552 + %.2794 =l loadl %.2565 + %.2795 =l loadl %.2794 + %.2796 =w loadsw %.2795 + %.2797 =l extsw %.2796 + %.2798 =l loadl %.87 + %.2799 =l loadl %.2798 + %.2800 =w loaduh %.2799 + %.2801 =l extuh %.2800 + %.2802 =w loadsw %.2 + %.2803 =l extsw %.2802 + %.2804 =l loadl %.1278 + %.2805 =l loadl %.2804 + %.2806 =l copy %.2805 + %.2807 =l copy $g_265 + %.2808 =l mul 48, 1 + %.2809 =l add %.2807, %.2808 + %.2810 =l copy %.2809 + %.2811 =w loadsw %.2810 + %.2812 =w copy %.2811 + %.2813 =w loadsw %.2 + %.2814 =w copy %.2813 + %.2815 =w call $safe_div_func_int8_t_s_s(w %.2812, w %.2814) + %.2816 =l copy 4 + %.2817 =w loadsw %.2 + %.2818 =l extsw %.2817 + %.2819 =l call $safe_add_func_uint64_t_u_u(l %.2816, l %.2818) + %.2820 =l and %.2806, %.2819 + %.2821 =l copy %.2820 + storel %.2821, %.2804 + %.2822 =l copy %.10 + %.2823 =l mul 16, 1 + %.2824 =l add %.2822, %.2823 + %.2825 =l copy %.2824 + %.2826 =w loadsw %.2825 + %.2827 =l extsw %.2826 + %.2828 =l call $safe_div_func_int64_t_s_s(l %.2821, l %.2827) + %.2829 =w copy %.2828 + %.2830 =w copy 203 + %.2831 =w call $safe_mul_func_int8_t_s_s(w %.2829, w %.2830) + %.2832 =w extsb %.2831 + %.2833 =w loadsw %.2 + %.2834 =w or %.2832, %.2833 + %.2835 =l extsw %.2834 + %.2836 =l loadl %.2547 + storel %.2835, %.2836 + %.2837 =l call $safe_add_func_int64_t_s_s(l %.2803, l %.2835) + %.2838 =l or %.2801, %.2837 + %.2839 =w copy %.2838 + storeh %.2839, %.2799 + %.2840 =l extuh %.2839 + %.2841 =l or %.2840, 65535 + %.2842 =w cnel %.2841, 0 + jnz %.2842, @logic_right.1172, @logic_join.1173 +@logic_right.1172 + %.2843 =w loadsw %.2 + %.2844 =w cnew %.2843, 0 +@logic_join.1173 + %.2845 =w phi @logic_join.1165 %.2842, @logic_right.1172 %.2844 + %.2846 =l extsw %.2845 + %.2847 =l call $safe_sub_func_int64_t_s_s(l %.2797, l %.2846) + %.2848 =w cnel 2224236467, 0 + jnz %.2848, @logic_right.1170, @logic_join.1171 +@logic_right.1170 + %.2849 =l loadl %.2565 + %.2850 =l loadl %.2849 + %.2851 =w loadsw %.2850 + %.2852 =w cnew %.2851, 0 +@logic_join.1171 + %.2853 =w phi @logic_join.1173 %.2848, @logic_right.1170 %.2852 + %.2854 =w copy %.2853 + %.2855 =l loadl %.2556 + %.2856 =w copy %.2855 + %.2857 =w call $safe_mul_func_uint16_t_u_u(w %.2854, w %.2856) + %.2858 =l loadl %.2565 + %.2859 =l loadl %.2858 + %.2860 =w loadsw %.2859 + %.2861 =w copy %.2860 + %.2862 =w call $safe_mul_func_uint16_t_u_u(w %.2857, w %.2861) + %.2863 =w copy %.2862 + %.2864 =w loadub $g_46 + %.2865 =w extub %.2864 + %.2866 =w call $safe_lshift_func_uint8_t_u_s(w %.2863, w %.2865) + %.2867 =w extub %.2866 + %.2868 =l loadl %.2577 + storew %.2867, %.2868 + %.2869 =l copy 1656972998 + %.2870 =w culel 4294967292, %.2869 + %.2871 =l or 214, 246 + %.2872 =w loadsw %.2 + %.2873 =l extsw %.2872 + %.2874 =w cnel %.2871, %.2873 + %.2875 =w loadsw %.1106 + %.2876 =w and %.2874, %.2875 + %.2877 =l extsw %.2876 + %.2878 =l xor %.2877, 3 + %.2879 =l loadl %.1289 + %.2880 =w ceql %.2793, %.2879 + %.2881 =w csgew %.2789, %.2880 + %.2882 =w cnew %.2881, 0 + jnz %.2882, @logic_right.1168, @logic_join.1169 +@logic_right.1168 + %.2883 =w loadsb %.509 + %.2884 =w extsb %.2883 + %.2885 =w cnew %.2884, 0 +@logic_join.1169 + %.2886 =w phi @logic_join.1171 %.2882, @logic_right.1168 %.2885 + %.2887 =l loadl %.4 + storew %.2886, %.2887 +@for_cont.1148 + %.2888 =w loadsb $g_631 + %.2889 =w extsb %.2888 + %.2890 =w sub %.2889, 1 + %.2891 =w copy %.2890 + storeb %.2891, $g_631 + jmp @for_cond.1146 +@for_join.1149 + %.2892 =w loadsw %.2 + %.2893 =w copy %.2892 + %.2894 =w call $safe_lshift_func_int16_t_s_s(w %.2893, w 10) + %.2895 =w extsh %.2894 + %.2896 =l loadl %.1261 + storew %.2895, %.2896 + %.2897 =w copy 0 + storeb %.2897, $g_566 +@for_cond.1174 + %.2898 =w loadub $g_566 + %.2899 =w extub %.2898 + %.2900 =w csltw %.2899, 8 + jnz %.2900, @for_body.1175, @for_join.1177 +@for_body.1175 + %.2901 =l extsw 0 + %.2902 =l copy %.2901 + %.2903 =w loadub $g_566 + %.2904 =l extub %.2903 + %.2905 =l mul %.2904, 8 + %.2906 =l add $g_364, %.2905 + storel %.2902, %.2906 +@for_cont.1176 + %.2907 =w loadub $g_566 + %.2908 =w extub %.2907 + %.2909 =w add %.2908, 1 + %.2910 =w copy %.2909 + storeb %.2910, $g_566 + jmp @for_cond.1174 +@for_join.1177 + %.2911 =l copy %.10 + %.2912 =l mul 48, 1 + %.2913 =l add %.2911, %.2912 + %.2914 =l copy %.2913 + storew 0, %.2914 +@for_cond.1178 + %.2915 =l copy %.10 + %.2916 =l mul 48, 1 + %.2917 =l add %.2915, %.2916 + %.2918 =l copy %.2917 + %.2919 =w loadsw %.2918 + %.2920 =w csgew %.2919, 0 + jnz %.2920, @for_body.1179, @for_join.1181 +@for_body.1179 + %.2922 =l add %.2921, 0 + %.2923 =l extsw 0 + %.2924 =l sub %.2923, 10 + %.2925 =w copy %.2924 + storeb %.2925, %.2922 + %.2926 =l loadl $g_88 + %.2927 =l loadl %.2926 + %.2928 =l loadl %.2927 + %.2929 =w loadsw %.2928 + %.2930 =w cnew %.2929, 0 + jnz %.2930, @if_true.1182, @if_false.1183 +@if_true.1182 + jmp @for_join.1181 +@if_false.1183 + %.2931 =l loadl $g_23 + %.2932 =w loadsw %.2931 + %.2933 =w cnew %.2932, 0 + jnz %.2933, @if_true.1184, @if_false.1185 +@if_true.1184 + jmp @for_cont.1180 +@if_false.1185 + %.2934 =l loadl %.4 + %.2935 =w loadsw %.2934 + %.2936 =w cnew %.2935, 0 + jnz %.2936, @if_true.1186, @if_false.1187 +@if_true.1186 + jmp @for_join.1181 +@if_false.1187 + %.2937 =l loadl $g_23 + %.2938 =w loadsw %.2937 + %.2939 =l copy $g_518 + %.2940 =l mul 48, 1 + %.2941 =l add %.2939, %.2940 + %.2942 =l copy %.2941 + %.2943 =w loadsw %.2942 + %.2944 =l copy %.10 + %.2945 =l mul 8, 1 + %.2946 =l add %.2944, %.2945 + %.2947 =l copy %.2946 + %.2948 =l loadl %.2947 + %.2949 =l loadl %.2547 + storel %.2948, %.2949 + %.2950 =l extsw 0 + %.2951 =l mul %.2950, 140 + %.2952 =l add %.533, %.2951 + %.2953 =l extsw 4 + %.2954 =l mul %.2953, 20 + %.2955 =l add %.2952, %.2954 + %.2956 =l extsw 3 + %.2957 =l mul %.2956, 4 + %.2958 =l add %.2955, %.2957 + %.2959 =w loadsw %.2958 + %.2960 =w copy %.2959 + %.2961 =w call $safe_lshift_func_int8_t_s_s(w %.2960, w 2) + %.2962 =l extsb %.2961 + %.2963 =w csltl %.2948, %.2962 + %.2964 =l loadl %.1261 + storew %.2963, %.2964 + %.2965 =w loadsw %.2 + %.2966 =l loadl %.2559 + %.2967 =l loadl $g_1069 + storel %.2967, $g_1069 + %.2968 =w cnel %.2966, %.2967 + %.2969 =w copy %.2968 + %.2970 =w loadsw %.2 + %.2971 =l extsw 0 + %.2972 =w ceql %.2, %.2971 + %.2973 =w ceqw %.2972, 0 + %.2974 =w copy %.2973 + %.2975 =w loadsw %.2 + %.2976 =w copy %.2975 + %.2977 =w call $safe_mul_func_int8_t_s_s(w %.2974, w %.2976) + %.2978 =w extsb %.2977 + %.2979 =w loadsw %.2 + %.2980 =w csltw %.2978, %.2979 + %.2981 =w copy %.2980 + %.2982 =w call $safe_sub_func_uint16_t_u_u(w %.2969, w %.2981) + %.2983 =l extuh %.2982 + %.2984 =w ceql 255, %.2983 + %.2985 =w loadsw %.2 + %.2986 =w or %.2965, %.2985 + %.2987 =w copy %.2986 + %.2988 =w copy 1 + %.2989 =w call $safe_add_func_uint8_t_u_u(w %.2987, w %.2988) + %.2990 =w extub %.2989 + %.2991 =w loadsb %.2921 + %.2992 =w extsb %.2991 + %.2993 =w cslew %.2990, %.2992 + %.2994 =w cnew %.2963, %.2993 + %.2995 =w loadsw %.2 + %.2996 =w xor %.2994, %.2995 + %.2997 =w loadsw %.2 + %.2998 =w ceqw %.2996, %.2997 + %.2999 =w xor %.2938, %.2998 + storew %.2999, %.2937 +@for_cont.1180 + %.3000 =l copy %.10 + %.3001 =l mul 48, 1 + %.3002 =l add %.3000, %.3001 + %.3003 =l copy %.3002 + %.3004 =w loadsw %.3003 + %.3005 =w sub %.3004, 1 + storew %.3005, %.3003 + jmp @for_cond.1178 +@for_join.1181 +@for_cont.1144 + %.3006 =l copy $g_265 + %.3007 =l mul 32, 1 + %.3008 =l add %.3006, %.3007 + %.3009 =l copy %.3008 + %.3010 =w loaduw %.3009 + %.3011 =w copy 1 + %.3012 =w add %.3010, %.3011 + storew %.3012, %.3009 + jmp @for_cond.1142 +@for_join.1145 +@for_cont.1091 + %.3013 =w loaduw $g_84 + %.3014 =w copy 1 + %.3015 =w add %.3013, %.3014 + storew %.3015, $g_84 + jmp @for_cond.1089 +@for_join.1092 + %.3016 =l extsw 0 + %.3017 =l copy $g_185 + %.3018 =l mul 24, 1 + %.3019 =l add %.3017, %.3018 + %.3020 =l copy %.3019 + storel %.3016, %.3020 +@for_cond.1188 + %.3021 =l copy $g_185 + %.3022 =l mul 24, 1 + %.3023 =l add %.3021, %.3022 + %.3024 =l copy %.3023 + %.3025 =l loadl %.3024 + %.3026 =l extsw 26 + %.3027 =w ceql %.3025, %.3026 + jnz %.3027, @for_body.1189, @for_join.1191 +@for_body.1189 + %.3029 =l add %.3028, 0 + %.3030 =l extsw 3 + %.3031 =l mul %.3030, 4 + %.3032 =l add %.1198, %.3031 + storel %.3032, %.3029 + %.3034 =l add %.3033, 0 + %.3035 =l extsw 0 + %.3036 =l copy %.3035 + storel %.3036, %.3034 + %.3038 =l add %.3037, 0 + %.3039 =l copy $g_185 + %.3040 =l mul 44, 1 + %.3041 =l add %.3039, %.3040 + %.3042 =l copy %.3041 + storel %.3042, %.3038 + %.3044 =l add %.3043, 0 + %.3045 =l extsw 0 + %.3046 =l copy %.3045 + storel %.3046, %.3044 + %.3048 =l add %.3047, 0 + %.3049 =l copy $g_518 + %.3050 =l mul 16, 1 + %.3051 =l add %.3049, %.3050 + %.3052 =l copy %.3051 + storel %.3052, %.3048 + %.3054 =l add %.3053, 0 + %.3055 =l copy $g_265 + %.3056 =l mul 48, 1 + %.3057 =l add %.3055, %.3056 + %.3058 =l copy %.3057 + storel %.3058, %.3054 + %.3060 =l add %.3059, 0 + %.3061 =l copy $g_185 + %.3062 =l mul 16, 1 + %.3063 =l add %.3061, %.3062 + %.3064 =l copy %.3063 + storel %.3064, %.3060 + %.3066 =l add %.3065, 0 + %.3067 =l extsw 0 + %.3068 =l mul %.3067, 140 + %.3069 =l add %.533, %.3068 + %.3070 =l extsw 4 + %.3071 =l mul %.3070, 20 + %.3072 =l add %.3069, %.3071 + %.3073 =l extsw 3 + %.3074 =l mul %.3073, 4 + %.3075 =l add %.3072, %.3074 + storel %.3075, %.3066 + %.3077 =l add %.3076, 0 + %.3078 =l copy $g_518 + %.3079 =l mul 44, 1 + %.3080 =l add %.3078, %.3079 + %.3081 =l copy %.3080 + storel %.3081, %.3077 + %.3083 =l add %.3082, 0 + %.3084 =l copy $g_185 + %.3085 =l mul 16, 1 + %.3086 =l add %.3084, %.3085 + %.3087 =l copy %.3086 + storel %.3087, %.3083 + %.3089 =l add %.3088, 0 + %.3090 =l copy %.89 + %.3091 =l mul 0, 1 + %.3092 =l add %.3090, %.3091 + %.3093 =l copy %.3092 + storel %.3093, %.3089 + %.3094 =l add %.3088, 8 + %.3095 =l extsw 0 + %.3096 =l mul %.3095, 140 + %.3097 =l add %.533, %.3096 + %.3098 =l extsw 4 + %.3099 =l mul %.3098, 20 + %.3100 =l add %.3097, %.3099 + %.3101 =l extsw 3 + %.3102 =l mul %.3101, 4 + %.3103 =l add %.3100, %.3102 + storel %.3103, %.3094 + %.3104 =l add %.3088, 16 + %.3105 =l copy $g_185 + %.3106 =l mul 44, 1 + %.3107 =l add %.3105, %.3106 + %.3108 =l copy %.3107 + storel %.3108, %.3104 + %.3109 =l add %.3088, 24 + %.3110 =l extsw 5 + %.3111 =l mul %.3110, 140 + %.3112 =l add %.533, %.3111 + %.3113 =l extsw 0 + %.3114 =l mul %.3113, 20 + %.3115 =l add %.3112, %.3114 + %.3116 =l extsw 3 + %.3117 =l mul %.3116, 4 + %.3118 =l add %.3115, %.3117 + storel %.3118, %.3109 + %.3119 =l add %.3088, 32 + %.3120 =l extsw 5 + %.3121 =l mul %.3120, 4 + %.3122 =l add %.1198, %.3121 + storel %.3122, %.3119 + %.3123 =l add %.3088, 40 + %.3124 =l copy %.10 + %.3125 =l mul 48, 1 + %.3126 =l add %.3124, %.3125 + %.3127 =l copy %.3126 + storel %.3127, %.3123 + %.3128 =l add %.3088, 48 + storel %.1106, %.3128 + %.3129 =l add %.3088, 56 + %.3130 =l extsw 0 + %.3131 =l copy %.3130 + storel %.3131, %.3129 + %.3132 =l add %.3088, 64 + %.3133 =l copy $g_185 + %.3134 =l mul 48, 1 + %.3135 =l add %.3133, %.3134 + %.3136 =l copy %.3135 + storel %.3136, %.3132 + %.3137 =l add %.3088, 72 + %.3138 =l copy $g_265 + %.3139 =l mul 44, 1 + %.3140 =l add %.3138, %.3139 + %.3141 =l copy %.3140 + storel %.3141, %.3137 + %.3142 =l add %.3088, 80 + %.3143 =l copy $g_185 + %.3144 =l mul 48, 1 + %.3145 =l add %.3143, %.3144 + %.3146 =l copy %.3145 + storel %.3146, %.3142 + %.3147 =l add %.3088, 88 + %.3148 =l extsw 5 + %.3149 =l mul %.3148, 140 + %.3150 =l add %.533, %.3149 + %.3151 =l extsw 0 + %.3152 =l mul %.3151, 20 + %.3153 =l add %.3150, %.3152 + %.3154 =l extsw 3 + %.3155 =l mul %.3154, 4 + %.3156 =l add %.3153, %.3155 + storel %.3156, %.3147 + %.3157 =l add %.3088, 96 + %.3158 =l copy $g_518 + %.3159 =l mul 48, 1 + %.3160 =l add %.3158, %.3159 + %.3161 =l copy %.3160 + storel %.3161, %.3157 + %.3162 =l add %.3088, 104 + %.3163 =l extsw 5 + %.3164 =l mul %.3163, 4 + %.3165 =l add %.1198, %.3164 + storel %.3165, %.3162 + %.3166 =l add %.3088, 112 + %.3167 =l extsw 0 + %.3168 =l copy %.3167 + storel %.3168, %.3166 + %.3169 =l add %.3088, 120 + %.3170 =l copy $g_518 + %.3171 =l mul 48, 1 + %.3172 =l add %.3170, %.3171 + %.3173 =l copy %.3172 + storel %.3173, %.3169 + %.3174 =l add %.3088, 128 + %.3175 =l extsw 0 + %.3176 =l mul %.3175, 140 + %.3177 =l add %.533, %.3176 + %.3178 =l extsw 4 + %.3179 =l mul %.3178, 20 + %.3180 =l add %.3177, %.3179 + %.3181 =l extsw 3 + %.3182 =l mul %.3181, 4 + %.3183 =l add %.3180, %.3182 + storel %.3183, %.3174 + %.3184 =l add %.3088, 136 + %.3185 =l extsw 0 + %.3186 =l copy %.3185 + storel %.3186, %.3184 + %.3187 =l add %.3088, 144 + %.3188 =l copy $g_185 + %.3189 =l mul 48, 1 + %.3190 =l add %.3188, %.3189 + %.3191 =l copy %.3190 + storel %.3191, %.3187 + %.3192 =l add %.3088, 152 + %.3193 =l copy %.89 + %.3194 =l mul 0, 1 + %.3195 =l add %.3193, %.3194 + %.3196 =l copy %.3195 + storel %.3196, %.3192 + %.3197 =l add %.3088, 160 + %.3198 =l copy %.10 + %.3199 =l mul 44, 1 + %.3200 =l add %.3198, %.3199 + %.3201 =l copy %.3200 + storel %.3201, %.3197 + %.3202 =l add %.3088, 168 + %.3203 =l copy $g_265 + %.3204 =l mul 44, 1 + %.3205 =l add %.3203, %.3204 + %.3206 =l copy %.3205 + storel %.3206, %.3202 + %.3207 =l add %.3088, 176 + %.3208 =l extsw 0 + %.3209 =l copy %.3208 + storel %.3209, %.3207 + %.3210 =l add %.3088, 184 + %.3211 =l copy $g_518 + %.3212 =l mul 48, 1 + %.3213 =l add %.3211, %.3212 + %.3214 =l copy %.3213 + storel %.3214, %.3210 + %.3215 =l add %.3088, 192 + %.3216 =l copy $g_130 + %.3217 =l mul 0, 1 + %.3218 =l add %.3216, %.3217 + %.3219 =l copy %.3218 + storel %.3219, %.3215 + %.3220 =l add %.3088, 200 + storel %.1106, %.3220 + %.3221 =l add %.3088, 208 + storel %.1106, %.3221 + %.3222 =l add %.3088, 216 + %.3223 =l copy %.10 + %.3224 =l mul 16, 1 + %.3225 =l add %.3223, %.3224 + %.3226 =l copy %.3225 + storel %.3226, %.3222 + %.3227 =l add %.3088, 224 + %.3228 =l copy $g_130 + %.3229 =l mul 0, 1 + %.3230 =l add %.3228, %.3229 + %.3231 =l copy %.3230 + storel %.3231, %.3227 + %.3232 =l add %.3088, 232 + %.3233 =l extsw 5 + %.3234 =l mul %.3233, 4 + %.3235 =l add %.1198, %.3234 + storel %.3235, %.3232 + %.3236 =l add %.3088, 240 + %.3237 =l extsw 0 + %.3238 =l copy %.3237 + storel %.3238, %.3236 + %.3239 =l add %.3088, 248 + %.3240 =l copy $g_130 + %.3241 =l mul 0, 1 + %.3242 =l add %.3240, %.3241 + %.3243 =l copy %.3242 + storel %.3243, %.3239 + %.3244 =l add %.3088, 256 + %.3245 =l copy $g_185 + %.3246 =l mul 48, 1 + %.3247 =l add %.3245, %.3246 + %.3248 =l copy %.3247 + storel %.3248, %.3244 + %.3249 =l add %.3088, 264 + %.3250 =l copy %.10 + %.3251 =l mul 48, 1 + %.3252 =l add %.3250, %.3251 + %.3253 =l copy %.3252 + storel %.3253, %.3249 + %.3254 =l add %.3088, 272 + %.3255 =l copy %.10 + %.3256 =l mul 48, 1 + %.3257 =l add %.3255, %.3256 + %.3258 =l copy %.3257 + storel %.3258, %.3254 + %.3259 =l add %.3088, 280 + %.3260 =l copy $g_518 + %.3261 =l mul 44, 1 + %.3262 =l add %.3260, %.3261 + %.3263 =l copy %.3262 + storel %.3263, %.3259 + %.3264 =l add %.3088, 288 + %.3265 =l extsw 0 + %.3266 =l copy %.3265 + storel %.3266, %.3264 + %.3267 =l add %.3088, 296 + %.3268 =l copy $g_518 + %.3269 =l mul 48, 1 + %.3270 =l add %.3268, %.3269 + %.3271 =l copy %.3270 + storel %.3271, %.3267 + %.3272 =l add %.3088, 304 + %.3273 =l extsw 0 + %.3274 =l copy %.3273 + storel %.3274, %.3272 + %.3275 =l add %.3088, 312 + %.3276 =l copy $g_518 + %.3277 =l mul 44, 1 + %.3278 =l add %.3276, %.3277 + %.3279 =l copy %.3278 + storel %.3279, %.3275 + %.3280 =l add %.3088, 320 + %.3281 =l extsw 5 + %.3282 =l mul %.3281, 4 + %.3283 =l add %.1198, %.3282 + storel %.3283, %.3280 + %.3284 =l add %.3088, 328 + %.3285 =l copy $g_518 + %.3286 =l mul 16, 1 + %.3287 =l add %.3285, %.3286 + %.3288 =l copy %.3287 + storel %.3288, %.3284 + %.3289 =l add %.3088, 336 + %.3290 =l copy $g_130 + %.3291 =l mul 0, 1 + %.3292 =l add %.3290, %.3291 + %.3293 =l copy %.3292 + storel %.3293, %.3289 + %.3294 =l add %.3088, 344 + %.3295 =l extsw 5 + %.3296 =l mul %.3295, 4 + %.3297 =l add %.1198, %.3296 + storel %.3297, %.3294 + %.3298 =l add %.3088, 352 + %.3299 =l copy $g_185 + %.3300 =l mul 16, 1 + %.3301 =l add %.3299, %.3300 + %.3302 =l copy %.3301 + storel %.3302, %.3298 + %.3303 =l add %.3088, 360 + %.3304 =l copy $g_185 + %.3305 =l mul 48, 1 + %.3306 =l add %.3304, %.3305 + %.3307 =l copy %.3306 + storel %.3307, %.3303 + %.3308 =l add %.3088, 368 + %.3309 =l extsw 0 + %.3310 =l copy %.3309 + storel %.3310, %.3308 + %.3311 =l add %.3088, 376 + %.3312 =l extsw 0 + %.3313 =l copy %.3312 + storel %.3313, %.3311 + %.3314 =l add %.3088, 384 + %.3315 =l copy %.10 + %.3316 =l mul 40, 1 + %.3317 =l add %.3315, %.3316 + %.3318 =l copy %.3317 + storel %.3318, %.3314 + %.3319 =l add %.3088, 392 + %.3320 =l extsw 0 + %.3321 =l copy %.3320 + storel %.3321, %.3319 + %.3322 =l add %.3088, 400 + %.3323 =l copy %.89 + %.3324 =l mul 0, 1 + %.3325 =l add %.3323, %.3324 + %.3326 =l copy %.3325 + storel %.3326, %.3322 + %.3327 =l add %.3088, 408 + %.3328 =l copy $g_518 + %.3329 =l mul 16, 1 + %.3330 =l add %.3328, %.3329 + %.3331 =l copy %.3330 + storel %.3331, %.3327 + %.3332 =l add %.3088, 416 + %.3333 =l extsw 5 + %.3334 =l mul %.3333, 140 + %.3335 =l add %.533, %.3334 + %.3336 =l extsw 0 + %.3337 =l mul %.3336, 20 + %.3338 =l add %.3335, %.3337 + %.3339 =l extsw 3 + %.3340 =l mul %.3339, 4 + %.3341 =l add %.3338, %.3340 + storel %.3341, %.3332 + %.3342 =l add %.3088, 424 + %.3343 =l copy $g_185 + %.3344 =l mul 16, 1 + %.3345 =l add %.3343, %.3344 + %.3346 =l copy %.3345 + storel %.3346, %.3342 + %.3347 =l add %.3088, 432 + %.3348 =l extsw 0 + %.3349 =l copy %.3348 + storel %.3349, %.3347 + %.3350 =l add %.3088, 440 + %.3351 =l extsw 5 + %.3352 =l mul %.3351, 4 + %.3353 =l add %.1198, %.3352 + storel %.3353, %.3350 + %.3354 =l add %.3088, 448 + %.3355 =l copy $g_130 + %.3356 =l mul 0, 1 + %.3357 =l add %.3355, %.3356 + %.3358 =l copy %.3357 + storel %.3358, %.3354 + %.3359 =l add %.3088, 456 + %.3360 =l copy $g_185 + %.3361 =l mul 48, 1 + %.3362 =l add %.3360, %.3361 + %.3363 =l copy %.3362 + storel %.3363, %.3359 + %.3364 =l add %.3088, 464 + %.3365 =l extsw 0 + %.3366 =l copy %.3365 + storel %.3366, %.3364 + %.3367 =l add %.3088, 472 + %.3368 =l copy %.10 + %.3369 =l mul 48, 1 + %.3370 =l add %.3368, %.3369 + %.3371 =l copy %.3370 + storel %.3371, %.3367 + %.3372 =l add %.3088, 480 + %.3373 =l copy $g_265 + %.3374 =l mul 44, 1 + %.3375 =l add %.3373, %.3374 + %.3376 =l copy %.3375 + storel %.3376, %.3372 + %.3377 =l add %.3088, 488 + %.3378 =l copy $g_130 + %.3379 =l mul 0, 1 + %.3380 =l add %.3378, %.3379 + %.3381 =l copy %.3380 + storel %.3381, %.3377 + %.3382 =l add %.3088, 496 + %.3383 =l copy $g_518 + %.3384 =l mul 44, 1 + %.3385 =l add %.3383, %.3384 + %.3386 =l copy %.3385 + storel %.3386, %.3382 + %.3387 =l add %.3088, 504 + %.3388 =l copy $g_518 + %.3389 =l mul 44, 1 + %.3390 =l add %.3388, %.3389 + %.3391 =l copy %.3390 + storel %.3391, %.3387 + %.3392 =l add %.3088, 512 + %.3393 =l copy $g_265 + %.3394 =l mul 44, 1 + %.3395 =l add %.3393, %.3394 + %.3396 =l copy %.3395 + storel %.3396, %.3392 + %.3397 =l add %.3088, 520 + %.3398 =l copy $g_185 + %.3399 =l mul 48, 1 + %.3400 =l add %.3398, %.3399 + %.3401 =l copy %.3400 + storel %.3401, %.3397 + %.3402 =l add %.3088, 528 + %.3403 =l extsw 5 + %.3404 =l mul %.3403, 4 + %.3405 =l add %.1198, %.3404 + storel %.3405, %.3402 + %.3406 =l add %.3088, 536 + %.3407 =l extsw 5 + %.3408 =l mul %.3407, 4 + %.3409 =l add %.1198, %.3408 + storel %.3409, %.3406 + %.3410 =l add %.3088, 544 + storel %.1106, %.3410 + %.3411 =l add %.3088, 552 + %.3412 =l copy %.10 + %.3413 =l mul 48, 1 + %.3414 =l add %.3412, %.3413 + %.3415 =l copy %.3414 + storel %.3415, %.3411 + %.3416 =l add %.3088, 560 + %.3417 =l copy $g_130 + %.3418 =l mul 0, 1 + %.3419 =l add %.3417, %.3418 + %.3420 =l copy %.3419 + storel %.3420, %.3416 + %.3421 =l add %.3088, 568 + %.3422 =l copy $g_518 + %.3423 =l mul 48, 1 + %.3424 =l add %.3422, %.3423 + %.3425 =l copy %.3424 + storel %.3425, %.3421 + %.3426 =l add %.3088, 576 + %.3427 =l extsw 5 + %.3428 =l mul %.3427, 4 + %.3429 =l add %.1198, %.3428 + storel %.3429, %.3426 + %.3430 =l add %.3088, 584 + %.3431 =l extsw 0 + %.3432 =l copy %.3431 + storel %.3432, %.3430 + %.3433 =l add %.3088, 592 + %.3434 =l extsw 5 + %.3435 =l mul %.3434, 4 + %.3436 =l add %.1198, %.3435 + storel %.3436, %.3433 + %.3437 =l add %.3088, 600 + storel %.1106, %.3437 + %.3438 =l add %.3088, 608 + %.3439 =l copy $g_185 + %.3440 =l mul 48, 1 + %.3441 =l add %.3439, %.3440 + %.3442 =l copy %.3441 + storel %.3442, %.3438 + %.3443 =l add %.3088, 616 + %.3444 =l copy $g_265 + %.3445 =l mul 44, 1 + %.3446 =l add %.3444, %.3445 + %.3447 =l copy %.3446 + storel %.3447, %.3443 + %.3448 =l add %.3088, 624 + %.3449 =l copy $g_265 + %.3450 =l mul 44, 1 + %.3451 =l add %.3449, %.3450 + %.3452 =l copy %.3451 + storel %.3452, %.3448 + %.3453 =l add %.3088, 632 + %.3454 =l copy $g_185 + %.3455 =l mul 48, 1 + %.3456 =l add %.3454, %.3455 + %.3457 =l copy %.3456 + storel %.3457, %.3453 + %.3458 =l add %.3088, 640 + %.3459 =l copy $g_185 + %.3460 =l mul 48, 1 + %.3461 =l add %.3459, %.3460 + %.3462 =l copy %.3461 + storel %.3462, %.3458 + %.3463 =l add %.3088, 648 + %.3464 =l copy $g_265 + %.3465 =l mul 44, 1 + %.3466 =l add %.3464, %.3465 + %.3467 =l copy %.3466 + storel %.3467, %.3463 + %.3468 =l add %.3088, 656 + %.3469 =l copy $g_265 + %.3470 =l mul 44, 1 + %.3471 =l add %.3469, %.3470 + %.3472 =l copy %.3471 + storel %.3472, %.3468 + %.3473 =l add %.3088, 664 + %.3474 =l copy %.89 + %.3475 =l mul 0, 1 + %.3476 =l add %.3474, %.3475 + %.3477 =l copy %.3476 + storel %.3477, %.3473 + %.3478 =l add %.3088, 672 + %.3479 =l extsw 0 + %.3480 =l copy %.3479 + storel %.3480, %.3478 + %.3481 =l add %.3088, 680 + storel %.1106, %.3481 + %.3482 =l add %.3088, 688 + %.3483 =l copy %.10 + %.3484 =l mul 48, 1 + %.3485 =l add %.3483, %.3484 + %.3486 =l copy %.3485 + storel %.3486, %.3482 + %.3487 =l add %.3088, 696 + %.3488 =l copy %.10 + %.3489 =l mul 44, 1 + %.3490 =l add %.3488, %.3489 + %.3491 =l copy %.3490 + storel %.3491, %.3487 + %.3492 =l add %.3088, 704 + %.3493 =l copy $g_185 + %.3494 =l mul 16, 1 + %.3495 =l add %.3493, %.3494 + %.3496 =l copy %.3495 + storel %.3496, %.3492 + %.3497 =l add %.3088, 712 + %.3498 =l copy $g_518 + %.3499 =l mul 16, 1 + %.3500 =l add %.3498, %.3499 + %.3501 =l copy %.3500 + storel %.3501, %.3497 + %.3502 =l add %.3088, 720 + %.3503 =l copy $g_265 + %.3504 =l mul 44, 1 + %.3505 =l add %.3503, %.3504 + %.3506 =l copy %.3505 + storel %.3506, %.3502 + %.3507 =l add %.3088, 728 + %.3508 =l extsw 0 + %.3509 =l copy %.3508 + storel %.3509, %.3507 + %.3510 =l add %.3088, 736 + %.3511 =l extsw 0 + %.3512 =l copy %.3511 + storel %.3512, %.3510 + %.3513 =l add %.3088, 744 + %.3514 =l copy $g_265 + %.3515 =l mul 44, 1 + %.3516 =l add %.3514, %.3515 + %.3517 =l copy %.3516 + storel %.3517, %.3513 + %.3518 =l add %.3088, 752 + %.3519 =l copy $g_794 + %.3520 =l mul 0, 1 + %.3521 =l add %.3519, %.3520 + %.3522 =l copy %.3521 + storel %.3522, %.3518 + %.3523 =l add %.3088, 760 + %.3524 =l extsw 0 + %.3525 =l copy %.3524 + storel %.3525, %.3523 + %.3526 =l add %.3088, 768 + %.3527 =l extsw 3 + %.3528 =l mul %.3527, 4 + %.3529 =l add %.1198, %.3528 + storel %.3529, %.3526 + %.3530 =l add %.3088, 776 + storel %.1106, %.3530 + %.3531 =l add %.3088, 784 + %.3532 =l copy %.89 + %.3533 =l mul 0, 1 + %.3534 =l add %.3532, %.3533 + %.3535 =l copy %.3534 + storel %.3535, %.3531 + %.3536 =l add %.3088, 792 + storel %.1106, %.3536 + %.3537 =l add %.3088, 800 + %.3538 =l copy %.89 + %.3539 =l mul 0, 1 + %.3540 =l add %.3538, %.3539 + %.3541 =l copy %.3540 + storel %.3541, %.3537 + %.3542 =l add %.3088, 808 + %.3543 =l extsw 0 + %.3544 =l copy %.3543 + storel %.3544, %.3542 + %.3545 =l add %.3088, 816 + %.3546 =l copy $g_518 + %.3547 =l mul 44, 1 + %.3548 =l add %.3546, %.3547 + %.3549 =l copy %.3548 + storel %.3549, %.3545 + %.3550 =l add %.3088, 824 + %.3551 =l copy $g_265 + %.3552 =l mul 44, 1 + %.3553 =l add %.3551, %.3552 + %.3554 =l copy %.3553 + storel %.3554, %.3550 + %.3555 =l add %.3088, 832 + %.3556 =l copy $g_185 + %.3557 =l mul 48, 1 + %.3558 =l add %.3556, %.3557 + %.3559 =l copy %.3558 + storel %.3559, %.3555 + %.3560 =l add %.3088, 840 + %.3561 =l copy %.10 + %.3562 =l mul 48, 1 + %.3563 =l add %.3561, %.3562 + %.3564 =l copy %.3563 + storel %.3564, %.3560 + %.3565 =l add %.3088, 848 + %.3566 =l copy $g_265 + %.3567 =l mul 44, 1 + %.3568 =l add %.3566, %.3567 + %.3569 =l copy %.3568 + storel %.3569, %.3565 + %.3570 =l add %.3088, 856 + %.3571 =l copy $g_130 + %.3572 =l mul 0, 1 + %.3573 =l add %.3571, %.3572 + %.3574 =l copy %.3573 + storel %.3574, %.3570 + %.3575 =l add %.3088, 864 + %.3576 =l copy $g_518 + %.3577 =l mul 44, 1 + %.3578 =l add %.3576, %.3577 + %.3579 =l copy %.3578 + storel %.3579, %.3575 + %.3580 =l add %.3088, 872 + %.3581 =l copy $g_185 + %.3582 =l mul 48, 1 + %.3583 =l add %.3581, %.3582 + %.3584 =l copy %.3583 + storel %.3584, %.3580 + %.3585 =l add %.3088, 880 + %.3586 =l extsw 0 + %.3587 =l copy %.3586 + storel %.3587, %.3585 + %.3588 =l add %.3088, 888 + %.3589 =l extsw 0 + %.3590 =l copy %.3589 + storel %.3590, %.3588 + %.3591 =l add %.3088, 896 + storel %.1106, %.3591 + %.3592 =l add %.3088, 904 + %.3593 =l copy $g_518 + %.3594 =l mul 48, 1 + %.3595 =l add %.3593, %.3594 + %.3596 =l copy %.3595 + storel %.3596, %.3592 + %.3597 =l add %.3088, 912 + %.3598 =l extsw 3 + %.3599 =l mul %.3598, 4 + %.3600 =l add %.1198, %.3599 + storel %.3600, %.3597 + %.3601 =l add %.3088, 920 + %.3602 =l extsw 0 + %.3603 =l copy %.3602 + storel %.3603, %.3601 + %.3604 =l add %.3088, 928 + storel %.1106, %.3604 + %.3605 =l add %.3088, 936 + %.3606 =l extsw 0 + %.3607 =l copy %.3606 + storel %.3607, %.3605 + %.3608 =l add %.3088, 944 + %.3609 =l extsw 0 + %.3610 =l mul %.3609, 140 + %.3611 =l add %.533, %.3610 + %.3612 =l extsw 4 + %.3613 =l mul %.3612, 20 + %.3614 =l add %.3611, %.3613 + %.3615 =l extsw 3 + %.3616 =l mul %.3615, 4 + %.3617 =l add %.3614, %.3616 + storel %.3617, %.3608 + %.3618 =l add %.3088, 952 + %.3619 =l copy $g_185 + %.3620 =l mul 48, 1 + %.3621 =l add %.3619, %.3620 + %.3622 =l copy %.3621 + storel %.3622, %.3618 + %.3623 =l add %.3088, 960 + %.3624 =l copy $g_265 + %.3625 =l mul 44, 1 + %.3626 =l add %.3624, %.3625 + %.3627 =l copy %.3626 + storel %.3627, %.3623 + %.3628 =l add %.3088, 968 + %.3629 =l copy $g_185 + %.3630 =l mul 48, 1 + %.3631 =l add %.3629, %.3630 + %.3632 =l copy %.3631 + storel %.3632, %.3628 + %.3633 =l add %.3088, 976 + %.3634 =l copy $g_185 + %.3635 =l mul 40, 1 + %.3636 =l add %.3634, %.3635 + %.3637 =l copy %.3636 + storel %.3637, %.3633 + %.3638 =l add %.3088, 984 + %.3639 =l copy $g_185 + %.3640 =l mul 48, 1 + %.3641 =l add %.3639, %.3640 + %.3642 =l copy %.3641 + storel %.3642, %.3638 + %.3643 =l add %.3088, 992 + %.3644 =l copy $g_265 + %.3645 =l mul 44, 1 + %.3646 =l add %.3644, %.3645 + %.3647 =l copy %.3646 + storel %.3647, %.3643 + %.3648 =l add %.3088, 1000 + %.3649 =l copy $g_185 + %.3650 =l mul 48, 1 + %.3651 =l add %.3649, %.3650 + %.3652 =l copy %.3651 + storel %.3652, %.3648 + %.3654 =l add %.3653, 0 + storel 6920699678995543627, %.3654 + %.3656 =l add %.3655, 0 + %.3657 =w copy 9 + storew %.3657, %.3656 + %.3659 =l add %.3658, 0 + %.3660 =w copy 193 + storeb %.3660, %.3659 + %.3664 =l loadl %.1115 + %.3665 =l loaduw $g_794 + storew %.3665, %.3664 + %.3666 =l add $g_794, 4 + %.3667 =l add %.3664, 4 + %.3668 =l loaduw %.3666 + storew %.3668, %.3667 + %.3669 =l add %.3666, 4 + %.3670 =l add %.3667, 4 + %.3671 =l loaduw %.3669 + storew %.3671, %.3670 + %.3672 =l add %.3669, 4 + %.3673 =l add %.3670, 4 + %.3674 =l loaduw %.3672 + storew %.3674, %.3673 + %.3675 =l add %.3672, 4 + %.3676 =l add %.3673, 4 + %.3677 =l loaduw %.3675 + storew %.3677, %.3676 + %.3678 =l add %.3675, 4 + %.3679 =l add %.3676, 4 + %.3680 =w loaduh %.110 + %.3681 =w add %.3680, 1 + storeh %.3681, %.110 + %.3682 =l copy $g_130 + %.3683 =l mul 12, 1 + %.3684 =l add %.3682, %.3683 + %.3685 =l copy %.3684 + storew 1, %.3685 +@for_cond.1192 + %.3686 =l copy $g_130 + %.3687 =l mul 12, 1 + %.3688 =l add %.3686, %.3687 + %.3689 =l copy %.3688 + %.3690 =w loadsw %.3689 + %.3691 =w cslew %.3690, 5 + jnz %.3691, @for_body.1193, @for_join.1195 +@for_body.1193 + %.3693 =l add %.3692, 0 + %.3694 =w copy 254 + storeb %.3694, %.3693 + %.3696 =l add %.3695, 0 + storel %.3043, %.3696 + %.3698 =l add %.3697, 0 + %.3699 =l extsw 0 + %.3700 =l sub %.3699, 1 + %.3701 =w copy %.3700 + storew %.3701, %.3698 + %.3703 =l add %.3702, 0 + %.3704 =w copy 150919925 + storew %.3704, %.3703 + storew 0, %.3706 +@for_cond.1196 + %.3707 =w loadsw %.3706 + %.3708 =w csltw %.3707, 9 + jnz %.3708, @for_body.1197, @for_join.1199 +@for_body.1197 + %.3709 =w copy 0 + %.3710 =w loadsw %.3706 + %.3711 =l extsw %.3710 + %.3712 =l mul %.3711, 4 + %.3713 =l add %.3705, %.3712 + storew %.3709, %.3713 +@for_cont.1198 + %.3714 =w loadsw %.3706 + %.3715 =w add %.3714, 1 + storew %.3715, %.3706 + jmp @for_cond.1196 +@for_join.1199 + %.3716 =w loadub %.3692 + %.3717 =w add %.3716, 1 + storeb %.3717, %.3692 + %.3718 =l loadl %.3695 + storel %.2, %.3718 + %.3719 =w loadub %.3658 + %.3720 =w add %.3719, 1 + storeb %.3720, %.3658 + %.3721 =l copy $g_130 + %.3722 =l mul 12, 1 + %.3723 =l add %.3721, %.3722 + %.3724 =l copy %.3723 + %.3725 =w loadsw %.3724 + %.3726 =l extsw %.3725 + %.3727 =l mul %.3726, 1 + %.3728 =l add $g_132, %.3727 + %.3729 =w loadsb %.3728 + %.3730 =w extsb %.3729 + %.3731 =w cnew %.3730, 0 + jnz %.3731, @if_true.1200, @if_false.1201 +@if_true.1200 + jmp @for_join.1195 +@if_false.1201 +@for_cont.1194 + %.3732 =l copy $g_130 + %.3733 =l mul 12, 1 + %.3734 =l add %.3732, %.3733 + %.3735 =l copy %.3734 + %.3736 =w loadsw %.3735 + %.3737 =w add %.3736, 1 + storew %.3737, %.3735 + jmp @for_cond.1192 +@for_join.1195 +@for_cont.1190 + %.3738 =l copy $g_185 + %.3739 =l mul 24, 1 + %.3740 =l add %.3738, %.3739 + %.3741 =l copy %.3740 + %.3742 =l loadl %.3741 + %.3743 =l copy %.3742 + %.3744 =l extsw 6 + %.3745 =l call $safe_add_func_int64_t_s_s(l %.3743, l %.3744) + %.3746 =l copy %.3745 + %.3747 =l copy $g_185 + %.3748 =l mul 24, 1 + %.3749 =l add %.3747, %.3748 + %.3750 =l copy %.3749 + storel %.3746, %.3750 + jmp @for_cond.1188 +@for_join.1191 +@for_cont.1087 + %.3751 =l copy %.89 + %.3752 =l mul 8, 1 + %.3753 =l add %.3751, %.3752 + %.3754 =l copy %.3753 + %.3755 =w loadsh %.3754 + %.3756 =w sub %.3755, 1 + storeh %.3756, %.3754 + jmp @for_cond.1085 +@for_join.1088 + %.3757 =w sub 0, 4 + %.3758 =w copy %.3757 + %.3759 =l copy $g_518 + %.3760 =l mul 0, 1 + %.3761 =l add %.3759, %.3760 + %.3762 =l copy %.3761 + storeb %.3758, %.3762 +@for_cond.1202 + %.3763 =l copy $g_518 + %.3764 =l mul 0, 1 + %.3765 =l add %.3763, %.3764 + %.3766 =l copy %.3765 + %.3767 =w loadub %.3766 + %.3768 =w extub %.3767 + %.3769 =w csgew %.3768, 55 + jnz %.3769, @for_body.1203, @for_join.1205 +@for_body.1203 + %.3771 =l add %.3770, 0 + %.3772 =w copy 6002 + storeh %.3772, %.3771 + %.3773 =l extsw 3 + %.3774 =l mul %.3773, 140 + %.3775 =l add %.533, %.3774 + %.3776 =l extsw 0 + %.3777 =l mul %.3776, 20 + %.3778 =l add %.3775, %.3777 + %.3779 =l extsw 3 + %.3780 =l mul %.3779, 4 + %.3781 =l add %.3778, %.3780 + %.3782 =w loadsw %.3781 + %.3783 =w loaduh %.3770 + %.3784 =w call $safe_lshift_func_uint16_t_u_s(w %.3783, w 12) + %.3785 =w extuh %.3784 + %.3786 =l loadl $g_173 + %.3787 =w loadsw %.3786 + %.3788 =w cslew %.3785, %.3787 + %.3789 =l extsw 0 + %.3790 =w cnel %.4, %.3789 + %.3791 =w copy 2634066933 + %.3792 =w call $safe_add_func_int32_t_s_s(w %.3790, w %.3791) + %.3793 =w cslew %.3788, %.3792 + %.3794 =w xor %.3782, %.3793 + storew %.3794, %.3781 +@for_cont.1204 + %.3795 =l copy $g_518 + %.3796 =l mul 0, 1 + %.3797 =l add %.3795, %.3796 + %.3798 =l copy %.3797 + %.3799 =w loadub %.3798 + %.3800 =w extub %.3799 + %.3801 =w copy 6 + %.3802 =w call $safe_add_func_int16_t_s_s(w %.3800, w %.3801) + %.3803 =w copy %.3802 + %.3804 =l copy $g_518 + %.3805 =l mul 0, 1 + %.3806 =l add %.3804, %.3805 + %.3807 =l copy %.3806 + storeb %.3803, %.3807 + jmp @for_cond.1202 +@for_join.1205 +@if_join.1080 +@lbl_1172.1206 + %.3808 =w copy 0 + %.3809 =l copy $g_518 + %.3810 =l mul 0, 1 + %.3811 =l add %.3809, %.3810 + %.3812 =l copy %.3811 + storeb %.3808, %.3812 +@for_cond.1207 + %.3813 =l copy $g_518 + %.3814 =l mul 0, 1 + %.3815 =l add %.3813, %.3814 + %.3816 =l copy %.3815 + %.3817 =w loadub %.3816 + %.3818 =w extub %.3817 + %.3819 =w cnew %.3818, 57 + jnz %.3819, @for_body.1208, @for_join.1210 +@for_body.1208 + %.3821 =l add %.3820, 0 + %.3822 =w copy 1 + storew %.3822, %.3821 + %.3824 =l add %.3823, 0 + %.3825 =w copy 809845413 + storew %.3825, %.3824 + %.3827 =l add %.3826, 0 + %.3828 =w copy 140435225 + storew %.3828, %.3827 + %.3830 =l add %.3829, 0 + %.3831 =w copy 1062787020 + storew %.3831, %.3830 + %.3833 =l add %.3832, 0 + %.3834 =w copy 0 + storew %.3834, %.3833 + %.3836 =l add %.3835, 0 + %.3837 =l copy $g_185 + %.3838 =l mul 40, 1 + %.3839 =l add %.3837, %.3838 + %.3840 =l copy %.3839 + storel %.3840, %.3836 + %.3842 =l add %.3841, 0 + %.3843 =l copy $g_185 + %.3844 =l mul 44, 1 + %.3845 =l add %.3843, %.3844 + %.3846 =l copy %.3845 + storel %.3846, %.3842 + %.3848 =l add %.3847, 0 + storel %.3823, %.3848 + %.3850 =l add %.3849, 0 + %.3851 =l copy %.10 + %.3852 =l mul 44, 1 + %.3853 =l add %.3851, %.3852 + %.3854 =l copy %.3853 + storel %.3854, %.3850 + %.3856 =l add %.3855, 0 + %.3857 =l copy %.10 + %.3858 =l mul 16, 1 + %.3859 =l add %.3857, %.3858 + %.3860 =l copy %.3859 + storel %.3860, %.3856 + %.3861 =l add %.3855, 8 + %.3862 =l extsw 0 + %.3863 =l copy %.3862 + storel %.3863, %.3861 + %.3864 =l add %.3855, 16 + %.3865 =l copy %.10 + %.3866 =l mul 16, 1 + %.3867 =l add %.3865, %.3866 + %.3868 =l copy %.3867 + storel %.3868, %.3864 + %.3869 =l add %.3855, 24 + %.3870 =l copy %.10 + %.3871 =l mul 16, 1 + %.3872 =l add %.3870, %.3871 + %.3873 =l copy %.3872 + storel %.3873, %.3869 + %.3874 =l add %.3855, 32 + %.3875 =l extsw 0 + %.3876 =l copy %.3875 + storel %.3876, %.3874 + %.3877 =l add %.3855, 40 + %.3878 =l copy %.10 + %.3879 =l mul 16, 1 + %.3880 =l add %.3878, %.3879 + %.3881 =l copy %.3880 + storel %.3881, %.3877 + %.3882 =l add %.3855, 48 + %.3883 =l copy %.10 + %.3884 =l mul 16, 1 + %.3885 =l add %.3883, %.3884 + %.3886 =l copy %.3885 + storel %.3886, %.3882 + %.3887 =l add %.3855, 56 + %.3888 =l extsw 0 + %.3889 =l copy %.3888 + storel %.3889, %.3887 + %.3890 =l add %.3855, 64 + %.3891 =l copy %.10 + %.3892 =l mul 16, 1 + %.3893 =l add %.3891, %.3892 + %.3894 =l copy %.3893 + storel %.3894, %.3890 + %.3895 =l add %.3855, 72 + %.3896 =l copy %.10 + %.3897 =l mul 16, 1 + %.3898 =l add %.3896, %.3897 + %.3899 =l copy %.3898 + storel %.3899, %.3895 + %.3900 =l add %.3855, 80 + %.3901 =l extsw 0 + %.3902 =l copy %.3901 + storel %.3902, %.3900 + %.3903 =l add %.3855, 88 + %.3904 =l copy %.10 + %.3905 =l mul 16, 1 + %.3906 =l add %.3904, %.3905 + %.3907 =l copy %.3906 + storel %.3907, %.3903 + %.3908 =l add %.3855, 96 + %.3909 =l copy %.10 + %.3910 =l mul 16, 1 + %.3911 =l add %.3909, %.3910 + %.3912 =l copy %.3911 + storel %.3912, %.3908 + %.3913 =l add %.3855, 104 + %.3914 =l extsw 0 + %.3915 =l copy %.3914 + storel %.3915, %.3913 + %.3916 =l add %.3855, 112 + %.3917 =l copy %.10 + %.3918 =l mul 16, 1 + %.3919 =l add %.3917, %.3918 + %.3920 =l copy %.3919 + storel %.3920, %.3916 + %.3921 =l add %.3855, 120 + %.3922 =l copy %.10 + %.3923 =l mul 16, 1 + %.3924 =l add %.3922, %.3923 + %.3925 =l copy %.3924 + storel %.3925, %.3921 + %.3926 =l add %.3855, 128 + %.3927 =l extsw 0 + %.3928 =l copy %.3927 + storel %.3928, %.3926 + %.3929 =l add %.3855, 136 + %.3930 =l copy %.10 + %.3931 =l mul 16, 1 + %.3932 =l add %.3930, %.3931 + %.3933 =l copy %.3932 + storel %.3933, %.3929 + %.3934 =l add %.3855, 144 + %.3935 =l copy $g_265 + %.3936 =l mul 48, 1 + %.3937 =l add %.3935, %.3936 + %.3938 =l copy %.3937 + storel %.3938, %.3934 + %.3939 =l add %.3855, 152 + %.3940 =l copy %.10 + %.3941 =l mul 16, 1 + %.3942 =l add %.3940, %.3941 + %.3943 =l copy %.3942 + storel %.3943, %.3939 + %.3944 =l add %.3855, 160 + %.3945 =l copy $g_265 + %.3946 =l mul 48, 1 + %.3947 =l add %.3945, %.3946 + %.3948 =l copy %.3947 + storel %.3948, %.3944 + %.3949 =l add %.3855, 168 + %.3950 =l copy $g_265 + %.3951 =l mul 48, 1 + %.3952 =l add %.3950, %.3951 + %.3953 =l copy %.3952 + storel %.3953, %.3949 + %.3954 =l add %.3855, 176 + %.3955 =l copy %.10 + %.3956 =l mul 16, 1 + %.3957 =l add %.3955, %.3956 + %.3958 =l copy %.3957 + storel %.3958, %.3954 + %.3959 =l add %.3855, 184 + %.3960 =l copy $g_265 + %.3961 =l mul 48, 1 + %.3962 =l add %.3960, %.3961 + %.3963 =l copy %.3962 + storel %.3963, %.3959 + %.3964 =l add %.3855, 192 + %.3965 =l copy $g_265 + %.3966 =l mul 48, 1 + %.3967 =l add %.3965, %.3966 + %.3968 =l copy %.3967 + storel %.3968, %.3964 + %.3969 =l add %.3855, 200 + %.3970 =l copy %.10 + %.3971 =l mul 16, 1 + %.3972 =l add %.3970, %.3971 + %.3973 =l copy %.3972 + storel %.3973, %.3969 + %.3974 =l add %.3855, 208 + %.3975 =l copy $g_265 + %.3976 =l mul 48, 1 + %.3977 =l add %.3975, %.3976 + %.3978 =l copy %.3977 + storel %.3978, %.3974 + %.3979 =l add %.3855, 216 + %.3980 =l copy $g_265 + %.3981 =l mul 48, 1 + %.3982 =l add %.3980, %.3981 + %.3983 =l copy %.3982 + storel %.3983, %.3979 + %.3984 =l add %.3855, 224 + %.3985 =l copy %.10 + %.3986 =l mul 16, 1 + %.3987 =l add %.3985, %.3986 + %.3988 =l copy %.3987 + storel %.3988, %.3984 + %.3989 =l add %.3855, 232 + %.3990 =l copy $g_265 + %.3991 =l mul 48, 1 + %.3992 =l add %.3990, %.3991 + %.3993 =l copy %.3992 + storel %.3993, %.3989 + %.3994 =l add %.3855, 240 + %.3995 =l copy $g_265 + %.3996 =l mul 48, 1 + %.3997 =l add %.3995, %.3996 + %.3998 =l copy %.3997 + storel %.3998, %.3994 + %.3999 =l add %.3855, 248 + %.4000 =l copy %.10 + %.4001 =l mul 16, 1 + %.4002 =l add %.4000, %.4001 + %.4003 =l copy %.4002 + storel %.4003, %.3999 + %.4004 =l add %.3855, 256 + %.4005 =l copy $g_265 + %.4006 =l mul 48, 1 + %.4007 =l add %.4005, %.4006 + %.4008 =l copy %.4007 + storel %.4008, %.4004 + %.4009 =l add %.3855, 264 + %.4010 =l copy $g_265 + %.4011 =l mul 48, 1 + %.4012 =l add %.4010, %.4011 + %.4013 =l copy %.4012 + storel %.4013, %.4009 + %.4014 =l add %.3855, 272 + %.4015 =l copy %.10 + %.4016 =l mul 16, 1 + %.4017 =l add %.4015, %.4016 + %.4018 =l copy %.4017 + storel %.4018, %.4014 + %.4019 =l add %.3855, 280 + %.4020 =l copy $g_265 + %.4021 =l mul 48, 1 + %.4022 =l add %.4020, %.4021 + %.4023 =l copy %.4022 + storel %.4023, %.4019 + %.4024 =l add %.3855, 288 + %.4025 =l copy $g_265 + %.4026 =l mul 48, 1 + %.4027 =l add %.4025, %.4026 + %.4028 =l copy %.4027 + storel %.4028, %.4024 + %.4029 =l add %.3855, 296 + %.4030 =l copy %.10 + %.4031 =l mul 16, 1 + %.4032 =l add %.4030, %.4031 + %.4033 =l copy %.4032 + storel %.4033, %.4029 + %.4034 =l add %.3855, 304 + %.4035 =l copy $g_265 + %.4036 =l mul 48, 1 + %.4037 =l add %.4035, %.4036 + %.4038 =l copy %.4037 + storel %.4038, %.4034 + %.4039 =l add %.3855, 312 + %.4040 =l copy $g_265 + %.4041 =l mul 48, 1 + %.4042 =l add %.4040, %.4041 + %.4043 =l copy %.4042 + storel %.4043, %.4039 + %.4044 =l add %.3855, 320 + %.4045 =l copy %.10 + %.4046 =l mul 16, 1 + %.4047 =l add %.4045, %.4046 + %.4048 =l copy %.4047 + storel %.4048, %.4044 + %.4049 =l add %.3855, 328 + %.4050 =l copy $g_265 + %.4051 =l mul 48, 1 + %.4052 =l add %.4050, %.4051 + %.4053 =l copy %.4052 + storel %.4053, %.4049 + %.4054 =l add %.3855, 336 + %.4055 =l copy $g_265 + %.4056 =l mul 48, 1 + %.4057 =l add %.4055, %.4056 + %.4058 =l copy %.4057 + storel %.4058, %.4054 + %.4059 =l add %.3855, 344 + %.4060 =l copy %.10 + %.4061 =l mul 16, 1 + %.4062 =l add %.4060, %.4061 + %.4063 =l copy %.4062 + storel %.4063, %.4059 + %.4064 =l add %.3855, 352 + %.4065 =l copy $g_265 + %.4066 =l mul 48, 1 + %.4067 =l add %.4065, %.4066 + %.4068 =l copy %.4067 + storel %.4068, %.4064 + %.4069 =l add %.3855, 360 + %.4070 =l copy $g_265 + %.4071 =l mul 48, 1 + %.4072 =l add %.4070, %.4071 + %.4073 =l copy %.4072 + storel %.4073, %.4069 + %.4074 =l add %.3855, 368 + %.4075 =l copy %.10 + %.4076 =l mul 16, 1 + %.4077 =l add %.4075, %.4076 + %.4078 =l copy %.4077 + storel %.4078, %.4074 + %.4079 =l add %.3855, 376 + %.4080 =l copy $g_265 + %.4081 =l mul 48, 1 + %.4082 =l add %.4080, %.4081 + %.4083 =l copy %.4082 + storel %.4083, %.4079 + %.4084 =l add %.3855, 384 + %.4085 =l copy $g_265 + %.4086 =l mul 48, 1 + %.4087 =l add %.4085, %.4086 + %.4088 =l copy %.4087 + storel %.4088, %.4084 + %.4089 =l add %.3855, 392 + %.4090 =l copy %.10 + %.4091 =l mul 16, 1 + %.4092 =l add %.4090, %.4091 + %.4093 =l copy %.4092 + storel %.4093, %.4089 + %.4094 =l add %.3855, 400 + %.4095 =l copy $g_265 + %.4096 =l mul 48, 1 + %.4097 =l add %.4095, %.4096 + %.4098 =l copy %.4097 + storel %.4098, %.4094 + %.4099 =l add %.3855, 408 + %.4100 =l copy $g_265 + %.4101 =l mul 48, 1 + %.4102 =l add %.4100, %.4101 + %.4103 =l copy %.4102 + storel %.4103, %.4099 + %.4104 =l add %.3855, 416 + %.4105 =l copy %.10 + %.4106 =l mul 16, 1 + %.4107 =l add %.4105, %.4106 + %.4108 =l copy %.4107 + storel %.4108, %.4104 + %.4109 =l add %.3855, 424 + %.4110 =l copy $g_265 + %.4111 =l mul 48, 1 + %.4112 =l add %.4110, %.4111 + %.4113 =l copy %.4112 + storel %.4113, %.4109 + %.4114 =l add %.3855, 432 + %.4115 =l copy $g_265 + %.4116 =l mul 48, 1 + %.4117 =l add %.4115, %.4116 + %.4118 =l copy %.4117 + storel %.4118, %.4114 + %.4119 =l add %.3855, 440 + %.4120 =l copy %.10 + %.4121 =l mul 16, 1 + %.4122 =l add %.4120, %.4121 + %.4123 =l copy %.4122 + storel %.4123, %.4119 + %.4124 =l add %.3855, 448 + %.4125 =l copy $g_265 + %.4126 =l mul 48, 1 + %.4127 =l add %.4125, %.4126 + %.4128 =l copy %.4127 + storel %.4128, %.4124 + %.4129 =l add %.3855, 456 + %.4130 =l copy $g_265 + %.4131 =l mul 48, 1 + %.4132 =l add %.4130, %.4131 + %.4133 =l copy %.4132 + storel %.4133, %.4129 + %.4134 =l add %.3855, 464 + %.4135 =l copy %.10 + %.4136 =l mul 16, 1 + %.4137 =l add %.4135, %.4136 + %.4138 =l copy %.4137 + storel %.4138, %.4134 + %.4139 =l add %.3855, 472 + %.4140 =l copy $g_265 + %.4141 =l mul 48, 1 + %.4142 =l add %.4140, %.4141 + %.4143 =l copy %.4142 + storel %.4143, %.4139 + %.4144 =l add %.3855, 480 + %.4145 =l copy $g_265 + %.4146 =l mul 48, 1 + %.4147 =l add %.4145, %.4146 + %.4148 =l copy %.4147 + storel %.4148, %.4144 + %.4149 =l add %.3855, 488 + %.4150 =l copy %.10 + %.4151 =l mul 16, 1 + %.4152 =l add %.4150, %.4151 + %.4153 =l copy %.4152 + storel %.4153, %.4149 + %.4154 =l add %.3855, 496 + %.4155 =l copy $g_265 + %.4156 =l mul 48, 1 + %.4157 =l add %.4155, %.4156 + %.4158 =l copy %.4157 + storel %.4158, %.4154 + %.4159 =l add %.3855, 504 + %.4160 =l copy $g_265 + %.4161 =l mul 48, 1 + %.4162 =l add %.4160, %.4161 + %.4163 =l copy %.4162 + storel %.4163, %.4159 + %.4164 =l add %.3855, 512 + %.4165 =l copy %.10 + %.4166 =l mul 16, 1 + %.4167 =l add %.4165, %.4166 + %.4168 =l copy %.4167 + storel %.4168, %.4164 + %.4169 =l add %.3855, 520 + %.4170 =l copy $g_265 + %.4171 =l mul 48, 1 + %.4172 =l add %.4170, %.4171 + %.4173 =l copy %.4172 + storel %.4173, %.4169 + %.4174 =l add %.3855, 528 + %.4175 =l copy $g_265 + %.4176 =l mul 48, 1 + %.4177 =l add %.4175, %.4176 + %.4178 =l copy %.4177 + storel %.4178, %.4174 + %.4179 =l add %.3855, 536 + %.4180 =l copy %.10 + %.4181 =l mul 16, 1 + %.4182 =l add %.4180, %.4181 + %.4183 =l copy %.4182 + storel %.4183, %.4179 + %.4184 =l add %.3855, 544 + %.4185 =l copy $g_265 + %.4186 =l mul 48, 1 + %.4187 =l add %.4185, %.4186 + %.4188 =l copy %.4187 + storel %.4188, %.4184 + %.4189 =l add %.3855, 552 + %.4190 =l copy $g_265 + %.4191 =l mul 48, 1 + %.4192 =l add %.4190, %.4191 + %.4193 =l copy %.4192 + storel %.4193, %.4189 + %.4194 =l add %.3855, 560 + %.4195 =l copy %.10 + %.4196 =l mul 16, 1 + %.4197 =l add %.4195, %.4196 + %.4198 =l copy %.4197 + storel %.4198, %.4194 + %.4199 =l add %.3855, 568 + %.4200 =l copy $g_265 + %.4201 =l mul 48, 1 + %.4202 =l add %.4200, %.4201 + %.4203 =l copy %.4202 + storel %.4203, %.4199 + %.4205 =l add %.4204, 0 + %.4206 =w copy 360956765 + storew %.4206, %.4205 + %.4208 =l add %.4207, 0 + %.4209 =w copy 2328868295 + storew %.4209, %.4208 + %.4211 =l add %.4210, 0 + storel $g_88, %.4211 + %.4215 =w copy 0 + storew %.4215, $g_1018 +@for_cond.1211 + %.4216 =w loaduw $g_1018 + %.4217 =w copy 9 + %.4218 =w culew %.4216, %.4217 + jnz %.4218, @for_body.1212, @for_join.1214 +@for_body.1212 + storew 0, %.4220 +@for_cond.1215 + %.4221 =w loadsw %.4220 + %.4222 =w csltw %.4221, 5 + jnz %.4222, @for_body.1216, @for_join.1218 +@for_body.1216 + %.4223 =l copy $g_185 + %.4224 =l mul 44, 1 + %.4225 =l add %.4223, %.4224 + %.4226 =l copy %.4225 + %.4227 =w loadsw %.4220 + %.4228 =l extsw %.4227 + %.4229 =l mul %.4228, 8 + %.4230 =l add %.4219, %.4229 + storel %.4226, %.4230 +@for_cont.1217 + %.4231 =w loadsw %.4220 + %.4232 =w add %.4231, 1 + storew %.4232, %.4220 + jmp @for_cond.1215 +@for_join.1218 + %.4233 =w loaduw %.3829 + %.4234 =w sub %.4233, 1 + storew %.4234, %.3829 + %.4235 =l extsw 0 + %.4236 =l copy %.4235 + storel %.4236, $g_1123 +@for_cont.1213 + %.4237 =w loaduw $g_1018 + %.4238 =w add %.4237, 1 + storew %.4238, $g_1018 + jmp @for_cond.1211 +@for_join.1214 + %.4239 =w loadsw %.2 + %.4240 =l extsw %.4239 + %.4241 =w ceql %.4240, 7 + %.4242 =l loadl %.4 + storew %.4241, %.4242 + %.4243 =w loaduw %.4207 + %.4244 =w add %.4243, 1 + storew %.4244, %.4207 + %.4245 =l extsw 1 + %.4246 =l mul %.4245, 8 + %.4247 =l add $g_172, %.4246 + %.4248 =l loadl %.4210 + storel %.4247, %.4248 +@for_cont.1209 + %.4249 =l copy $g_518 + %.4250 =l mul 0, 1 + %.4251 =l add %.4249, %.4250 + %.4252 =l copy %.4251 + %.4253 =w loadub %.4252 + %.4254 =w copy 3 + %.4255 =w call $safe_add_func_uint8_t_u_u(w %.4253, w %.4254) + %.4256 =l copy $g_518 + %.4257 =l mul 0, 1 + %.4258 =l add %.4256, %.4257 + %.4259 =l copy %.4258 + storeb %.4255, %.4259 + jmp @for_cond.1207 +@for_join.1210 + %.4260 =l copy %.89 + %.4261 =l mul 4, 1 + %.4262 =l add %.4260, %.4261 + %.4263 =l copy %.4262 + %.4264 =w loaduw %.4263 + %.4265 =w copy %.4264 + %.4266 =w loadsw %.101 + %.4267 =w call $safe_sub_func_int32_t_s_s(w %.4265, w %.4266) + %.4268 =w cnel 1, 0 + jnz %.4268, @logic_join.1220, @logic_right.1219 +@logic_right.1219 + %.4269 =w loadsw %.123 + %.4270 =w cnew %.4269, 0 +@logic_join.1220 + %.4271 =w phi @for_join.1210 %.4268, @logic_right.1219 %.4270 + %.4272 =l loadl $g_88 + %.4273 =l loadl %.4272 + %.4274 =l extsw 0 + %.4275 =w cnel %.4273, %.4274 + %.4276 =w loadsw %.2 + %.4277 =l copy %.10 + %.4278 =l mul 8, 1 + %.4279 =l add %.4277, %.4278 + %.4280 =l copy %.4279 + %.4281 =l loadl %.4280 + %.4282 =w copy %.4281 + %.4283 =l copy %.133 + %.4284 =l mul 0, 1 + %.4285 =l add %.4283, %.4284 + %.4286 =l copy %.4285 + storew %.4282, %.4286 + %.4287 =w copy %.4282 + %.4288 =l loadl %.155 + storeh %.4287, %.4288 + %.4289 =w copy 41250 + %.4290 =w call $safe_mul_func_int16_t_s_s(w %.4287, w %.4289) + %.4291 =w copy %.4290 + %.4292 =w loadsh %.161 + %.4293 =w copy %.4292 + %.4294 =w call $safe_mul_func_uint16_t_u_u(w %.4291, w %.4293) + %.4295 =w extuh %.4294 + %.4296 =w loadsb %.5 + %.4297 =w extsb %.4296 + %.4298 =w csgew %.4295, %.4297 + %.4299 =w copy %.4298 + %.4300 =w loadsh %.161 + %.4301 =w extsh %.4300 + %.4302 =w call $safe_rshift_func_uint8_t_u_u(w %.4299, w %.4301) + %.4303 =w extub %.4302 + %.4304 =w cslew %.4276, %.4303 + %.4305 =w copy %.4304 + %.4306 =l copy %.89 + %.4307 =l mul 0, 1 + %.4308 =l add %.4306, %.4307 + %.4309 =l copy %.4308 + %.4310 =w loadsw %.4309 + %.4311 =w call $safe_lshift_func_uint8_t_u_s(w %.4305, w %.4310) + %.4312 =w extub %.4311 + %.4313 =l extsw 2 + %.4314 =l mul %.4313, 4 + %.4315 =l add %.164, %.4314 + %.4316 =w loaduw %.4315 + %.4317 =w copy %.4316 + %.4318 =w call $safe_sub_func_int16_t_s_s(w %.4312, w %.4317) + %.4319 =w extsh %.4318 + %.4320 =w xor %.4275, %.4319 + %.4321 =w copy %.4320 + %.4322 =l copy %.89 + %.4323 =l mul 12, 1 + %.4324 =l add %.4322, %.4323 + %.4325 =l copy %.4324 + %.4326 =w loadsw %.4325 + %.4327 =w copy %.4326 + %.4328 =w call $safe_add_func_int8_t_s_s(w %.4321, w %.4327) + %.4329 =l extsw 0 + %.4330 =w cnel %.147, %.4329 + %.4331 =w copy %.4330 + %.4332 =w loaduh %.110 + %.4333 =w copy %.4332 + %.4334 =w call $safe_mul_func_int16_t_s_s(w %.4331, w %.4333) + %.4335 =w extsh %.4334 + %.4336 =w csgtw %.4271, %.4335 + %.4337 =w and %.4267, %.4336 + %.4338 =l extsw %.4337 + %.4339 =w loadsw %.2 + %.4340 =l extsw %.4339 + %.4341 =l call $safe_add_func_uint64_t_u_u(l %.4338, l %.4340) + %.4342 =l extsw 0 + %.4343 =l extsw 0 + %.4344 =w cnel %.4342, %.4343 + %.4345 =w copy %.4344 + %.4346 =l extsw 0 + %.4347 =l mul %.4346, 8 + %.4348 =l add %.109, %.4347 + %.4349 =l loadl %.4348 + %.4350 =w copy %.4349 + %.4351 =w call $safe_rshift_func_uint8_t_u_u(w %.4345, w %.4350) + %.4352 =w extub %.4351 + %.4353 =l loadl $g_23 + %.4354 =w loadsw %.4353 + %.4355 =w or %.4352, %.4354 + %.4356 =l extsw %.4355 + %.4357 =w ceql %.4356, 233 + %.4358 =l loadl %.4 + %.4359 =w loadsw %.4358 + %.4360 =l extsw %.4359 + %.4361 =w cnel %.4360, 877431633 + %.4362 =l loadl $g_173 + %.4363 =w loadsw %.4362 + %.4364 =w ceqw %.4361, %.4363 + %.4365 =w loadsw %.2 + %.4366 =w copy %.4365 + %.4367 =w loadsw %.101 + %.4368 =w copy %.4367 + %.4369 =w call $safe_mod_func_uint16_t_u_u(w %.4366, w %.4368) + %.4370 =w extuh %.4369 + %.4371 =l loadl $g_38 + %.4372 =l loadl %.4371 + storew %.4370, %.4372 + %.4373 =w sub 0, 6 + %.4374 =w copy %.4373 + storeb %.4374, $g_566 +@for_cond.1221 + %.4375 =w loadub $g_566 + %.4376 =w extub %.4375 + %.4377 =w csgew %.4376, 48 + jnz %.4377, @for_body.1222, @for_join.1224 +@for_body.1222 + %.4379 =l add %.4378, 0 + %.4380 =l copy $g_185 + %.4381 =l mul 44, 1 + %.4382 =l add %.4380, %.4381 + %.4383 =l copy %.4382 + storel %.4383, %.4379 + %.4385 =l add %.4384, 0 + storel %.123, %.4385 + %.4388 =l add %.4387, 0 + %.4389 =w copy 460368954 + storew %.4389, %.4388 + %.4391 =l add %.4390, 0 + %.4392 =w copy 9 + storew %.4392, %.4391 + %.4394 =l add %.4393, 0 + %.4395 =w copy 35248 + storeh %.4395, %.4394 + storew 0, %.4396 +@for_cond.1225 + %.4398 =w loadsw %.4396 + %.4399 =w csltw %.4398, 1 + jnz %.4399, @for_body.1226, @for_join.1228 +@for_body.1226 + storew 0, %.4397 +@for_cond.1229 + %.4400 =w loadsw %.4397 + %.4401 =w csltw %.4400, 1 + jnz %.4401, @for_body.1230, @for_join.1232 +@for_body.1230 + %.4402 =l copy %.10 + %.4403 =l mul 16, 1 + %.4404 =l add %.4402, %.4403 + %.4405 =l copy %.4404 + %.4406 =w loadsw %.4396 + %.4407 =l extsw %.4406 + %.4408 =l mul %.4407, 8 + %.4409 =l add %.4386, %.4408 + %.4410 =w loadsw %.4397 + %.4411 =l extsw %.4410 + %.4412 =l mul %.4411, 8 + %.4413 =l add %.4409, %.4412 + storel %.4405, %.4413 +@for_cont.1231 + %.4414 =w loadsw %.4397 + %.4415 =w add %.4414, 1 + storew %.4415, %.4397 + jmp @for_cond.1229 +@for_join.1232 +@for_cont.1227 + %.4416 =w loadsw %.4396 + %.4417 =w add %.4416, 1 + storew %.4417, %.4396 + jmp @for_cond.1225 +@for_join.1228 + %.4418 =l copy %.10 + %.4419 =l mul 36, 1 + %.4420 =l add %.4418, %.4419 + %.4421 =l copy %.4420 + %.4422 =w loaduw %.4421 + %.4423 =w cnew %.4422, 0 + jnz %.4423, @if_true.1233, @if_false.1234 +@if_true.1233 + jmp @lbl_1172.1206 +@if_false.1234 + %.4424 =w loaduh %.4393 + %.4425 =w sub %.4424, 1 + storeh %.4425, %.4393 +@for_cont.1223 + %.4426 =w loadub $g_566 + %.4427 =w add %.4426, 1 + storeb %.4427, $g_566 + jmp @for_cond.1221 +@for_join.1224 + %.4428 =w loadsw %.126 + %.4429 =w copy %.4428 + ret %.4429 +} +function l $func_33(l %.1, w %.3, w %.5) { +@start.1235 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 4 + storew %.3, %.4 + %.6 =l alloc4 1 + storeb %.5, %.6 + %.7 =l alloc8 1920 + %.248 =l alloc8 8 + %.250 =l alloc4 2880 + %.2005 =l alloc8 8 + %.2010 =l alloc4 4 + %.2013 =l alloc8 72 + %.2023 =l alloc4 1 + %.2026 =l alloc8 8 + %.2030 =l alloc4 4 + %.2033 =l alloc4 4 + %.2036 =l alloc8 8 + %.2042 =l alloc4 1 + %.2045 =l alloc8 8 + %.2047 =l alloc4 4 + %.2048 =l alloc4 4 + %.2049 =l alloc4 4 + %.2122 =l alloc4 4 + %.2125 =l alloc4 4 + %.2128 =l alloc8 8 + %.2130 =l alloc8 8 + %.2132 =l alloc8 8 + %.2143 =l alloc8 8 + %.2145 =l alloc8 16 + %.2146 =l alloc4 4 + %.2149 =l alloc4 4 + %.2152 =l alloc4 4 + %.2153 =l alloc8 720 + %.2829 =l alloc4 1 + %.2832 =l alloc4 1 + %.2835 =l alloc4 4 + %.2840 =l alloc4 4 + %.2841 =l alloc4 4 + %.2842 =l alloc4 4 + %.2872 =l alloc4 1 + %.2875 =l alloc8 8 + %.2886 =l alloc4 4 + %.2891 =l alloc8 8 + %.2893 =l alloc8 8 + %.2895 =l alloc8 8 + %.2897 =l alloc8 216 + %.2935 =l alloc4 240 + %.3464 =l alloc8 8 + %.3468 =l alloc8 8 + %.3470 =l alloc4 4 + %.3471 =l alloc4 4 + %.3472 =l alloc4 4 + %.3496 =l alloc8 8 + %.3498 =l alloc4 324 + %.3699 =l alloc8 320 + %.3840 =l alloc8 8 + %.3844 =l alloc8 8 + %.3848 =l alloc8 8 + %.3850 =l alloc8 64 + %.3859 =l alloc8 8 + %.3861 =l alloc8 8 + %.3865 =l alloc8 8 + %.3869 =l alloc4 4 + %.3872 =l alloc8 8 + %.3878 =l alloc8 8 + %.3880 =l alloc4 1 + %.3883 =l alloc8 8 + %.3885 =l alloc4 4 + %.3888 =l alloc4 4 + %.3889 =l alloc4 4 + %.3890 =l alloc4 4 + %.4031 =l alloc8 48 + %.4038 =l alloc8 8 + %.4041 =l alloc8 8 + %.4043 =l alloc8 8 + %.4054 =l alloc8 48 + %.4069 =l alloc4 4 + %.4070 =l alloc4 4 + %.4113 =l alloc8 8 + %.4115 =l alloc8 8 + %.4117 =l alloc4 4 + %.4120 =l alloc8 8 + %.4126 =l alloc8 8 + %.4273 =l alloc8 8 + %.4305 =l alloc8 8 + %.4404 =l alloc8 8 + %.4406 =l alloc8 8 + %.4410 =l alloc8 8 + %.4412 =l alloc8 8 + %.4427 =l alloc4 4 + %.4432 =l alloc8 16 + %.4433 =l alloc8 8 + %.4436 =l alloc8 8 + %.4439 =l alloc4 4 + %.4532 =l alloc4 4 + %.4535 =l alloc8 48 + %.4548 =l alloc4 360 + %.4763 =l alloc8 8 + %.4774 =l alloc4 4 + %.4775 =l alloc4 4 + %.4776 =l alloc4 4 + %.4953 =l alloc8 8 + %.4955 =l alloc4 4 + %.4958 =l alloc4 360 + %.5147 =l alloc8 8 + %.5149 =l alloc8 8 + %.5153 =l alloc4 4 + %.5154 =l alloc4 4 + %.5444 =l alloc4 4 + %.5449 =l alloc4 2 + %.5452 =l alloc8 8 + %.5610 =l alloc4 2 + %.5613 =l alloc8 8 + %.5617 =l alloc8 8 + %.5621 =l alloc8 56 + %.5629 =l alloc8 8 + %.5631 =l alloc8 80 + %.5732 =l alloc4 4 + %.5735 =l alloc4 20 + %.5749 =l alloc8 160 + %.5890 =l alloc8 8 + %.5892 =l alloc4 4 + %.5895 =l alloc4 4 + %.5896 =l alloc4 4 + %.5982 =l alloc8 8 + %.5993 =l alloc8 8 + %.5995 =l alloc4 12 + %.5996 =l alloc4 1 + %.5999 =l alloc8 72 + %.6126 =l alloc8 8 + %.6132 =l alloc4 4 + %.6188 =l alloc8 8 + %.6191 =l alloc8 8 + %.6197 =l alloc4 4 + %.6210 =l alloc4 4 + %.6299 =l alloc8 8 + %.6301 =l alloc8 8 + %.6303 =l alloc8 8 + %.6307 =l alloc8 8 + %.6309 =l alloc8 8 + %.6317 =l alloc4 84 + %.6368 =l alloc4 4 + %.6369 =l alloc4 4 + %.6433 =l alloc8 8 + %.6436 =l alloc4 4 + %.6439 =l alloc4 16 + %.6448 =l alloc4 4 + %.6519 =l alloc8 8 + %.6552 =l alloc4 4 + %.6679 =l alloc4 1 + %.6682 =l alloc8 8 + %.6683 =l alloc4 4 + %.6744 =l alloc8 8 + %.6750 =l alloc4 8 + %.6751 =l alloc4 4 + %.6754 =l alloc8 8 + %.6760 =l alloc4 4 + %.6779 =l alloc4 14 + %.6780 =l alloc8 56 + %.6805 =l alloc4 2 + %.6806 =l alloc4 4 + %.6807 =l alloc4 4 + %.6847 =l alloc4 2 + %.6850 =l alloc4 4 + %.6853 =l alloc4 4 + %.6993 =l alloc4 4 + %.6994 =l alloc4 4 + %.7110 =l alloc8 64 + %.7127 =l alloc8 8 + %.7142 =l alloc8 8 + %.7144 =l alloc8 8 + %.7147 =l alloc8 32 + %.7152 =l alloc4 4 + %.7155 =l alloc8 1680 + %.7825 =l alloc8 8 + %.7827 =l alloc4 4 + %.7828 =l alloc4 4 + %.7829 =l alloc4 4 + %.7900 =l alloc8 8 + %.7902 =l alloc4 4 + %.7905 =l alloc4 1 + %.7929 =l alloc8 48 + %.7958 =l alloc4 4 + %.7961 =l alloc4 4 + %.8018 =l alloc8 8 + %.8020 =l alloc8 256 + %.8065 =l alloc4 4 + %.8068 =l alloc4 4 + %.8069 =l alloc4 4 + %.8070 =l alloc4 4 + %.8075 =l alloc4 4 + %.8076 =l alloc4 4 + %.8077 =l alloc4 4 + %.8187 =l alloc8 8 + %.8189 =l alloc4 4 + %.8190 =l alloc4 4 + %.8191 =l alloc4 4 + %.8247 =l alloc4 20 +@body.1236 + %.8 =l add %.7, 0 + storel $g_24, %.8 + %.9 =l add %.7, 8 + storel $g_24, %.9 + %.10 =l add %.7, 16 + storel $g_24, %.10 + %.11 =l add %.7, 24 + storel $g_24, %.11 + %.12 =l add %.7, 32 + storel $g_24, %.12 + %.13 =l add %.7, 40 + storel $g_24, %.13 + %.14 =l add %.7, 48 + storel $g_24, %.14 + %.15 =l add %.7, 56 + storel $g_24, %.15 + %.16 =l add %.7, 64 + storel $g_24, %.16 + %.17 =l add %.7, 72 + storel $g_24, %.17 + %.18 =l add %.7, 80 + storel $g_24, %.18 + %.19 =l add %.7, 88 + storel $g_24, %.19 + %.20 =l add %.7, 96 + storel $g_24, %.20 + %.21 =l add %.7, 104 + storel $g_24, %.21 + %.22 =l add %.7, 112 + storel $g_24, %.22 + %.23 =l add %.7, 120 + storel $g_24, %.23 + %.24 =l add %.7, 128 + storel $g_24, %.24 + %.25 =l add %.7, 136 + storel $g_24, %.25 + %.26 =l add %.7, 144 + storel $g_24, %.26 + %.27 =l add %.7, 152 + storel $g_24, %.27 + %.28 =l add %.7, 160 + storel $g_24, %.28 + %.29 =l add %.7, 168 + storel $g_24, %.29 + %.30 =l add %.7, 176 + storel $g_24, %.30 + %.31 =l add %.7, 184 + storel $g_24, %.31 + %.32 =l add %.7, 192 + storel $g_24, %.32 + %.33 =l add %.7, 200 + storel $g_24, %.33 + %.34 =l add %.7, 208 + storel $g_24, %.34 + %.35 =l add %.7, 216 + storel $g_24, %.35 + %.36 =l add %.7, 224 + storel $g_24, %.36 + %.37 =l add %.7, 232 + storel $g_24, %.37 + %.38 =l add %.7, 240 + storel $g_24, %.38 + %.39 =l add %.7, 248 + storel $g_24, %.39 + %.40 =l add %.7, 256 + storel $g_24, %.40 + %.41 =l add %.7, 264 + storel $g_24, %.41 + %.42 =l add %.7, 272 + storel $g_24, %.42 + %.43 =l add %.7, 280 + storel $g_24, %.43 + %.44 =l add %.7, 288 + storel $g_24, %.44 + %.45 =l add %.7, 296 + storel $g_24, %.45 + %.46 =l add %.7, 304 + storel $g_24, %.46 + %.47 =l add %.7, 312 + storel $g_24, %.47 + %.48 =l add %.7, 320 + storel $g_24, %.48 + %.49 =l add %.7, 328 + storel $g_24, %.49 + %.50 =l add %.7, 336 + storel $g_24, %.50 + %.51 =l add %.7, 344 + storel $g_24, %.51 + %.52 =l add %.7, 352 + storel $g_24, %.52 + %.53 =l add %.7, 360 + storel $g_24, %.53 + %.54 =l add %.7, 368 + storel $g_24, %.54 + %.55 =l add %.7, 376 + storel $g_24, %.55 + %.56 =l add %.7, 384 + storel $g_24, %.56 + %.57 =l add %.7, 392 + storel $g_24, %.57 + %.58 =l add %.7, 400 + storel $g_24, %.58 + %.59 =l add %.7, 408 + storel $g_24, %.59 + %.60 =l add %.7, 416 + storel $g_24, %.60 + %.61 =l add %.7, 424 + storel $g_24, %.61 + %.62 =l add %.7, 432 + storel $g_24, %.62 + %.63 =l add %.7, 440 + storel $g_24, %.63 + %.64 =l add %.7, 448 + storel $g_24, %.64 + %.65 =l add %.7, 456 + storel $g_24, %.65 + %.66 =l add %.7, 464 + storel $g_24, %.66 + %.67 =l add %.7, 472 + storel $g_24, %.67 + %.68 =l add %.7, 480 + storel $g_24, %.68 + %.69 =l add %.7, 488 + storel $g_24, %.69 + %.70 =l add %.7, 496 + storel $g_24, %.70 + %.71 =l add %.7, 504 + storel $g_24, %.71 + %.72 =l add %.7, 512 + storel $g_24, %.72 + %.73 =l add %.7, 520 + storel $g_24, %.73 + %.74 =l add %.7, 528 + storel $g_24, %.74 + %.75 =l add %.7, 536 + storel $g_24, %.75 + %.76 =l add %.7, 544 + storel $g_24, %.76 + %.77 =l add %.7, 552 + storel $g_24, %.77 + %.78 =l add %.7, 560 + storel $g_24, %.78 + %.79 =l add %.7, 568 + storel $g_24, %.79 + %.80 =l add %.7, 576 + storel $g_24, %.80 + %.81 =l add %.7, 584 + storel $g_24, %.81 + %.82 =l add %.7, 592 + storel $g_24, %.82 + %.83 =l add %.7, 600 + storel $g_24, %.83 + %.84 =l add %.7, 608 + storel $g_24, %.84 + %.85 =l add %.7, 616 + storel $g_24, %.85 + %.86 =l add %.7, 624 + storel $g_24, %.86 + %.87 =l add %.7, 632 + storel $g_24, %.87 + %.88 =l add %.7, 640 + storel $g_24, %.88 + %.89 =l add %.7, 648 + storel $g_24, %.89 + %.90 =l add %.7, 656 + storel $g_24, %.90 + %.91 =l add %.7, 664 + storel $g_24, %.91 + %.92 =l add %.7, 672 + storel $g_24, %.92 + %.93 =l add %.7, 680 + storel $g_24, %.93 + %.94 =l add %.7, 688 + storel $g_24, %.94 + %.95 =l add %.7, 696 + storel $g_24, %.95 + %.96 =l add %.7, 704 + storel $g_24, %.96 + %.97 =l add %.7, 712 + storel $g_24, %.97 + %.98 =l add %.7, 720 + storel $g_24, %.98 + %.99 =l add %.7, 728 + storel $g_24, %.99 + %.100 =l add %.7, 736 + storel $g_24, %.100 + %.101 =l add %.7, 744 + storel $g_24, %.101 + %.102 =l add %.7, 752 + storel $g_24, %.102 + %.103 =l add %.7, 760 + storel $g_24, %.103 + %.104 =l add %.7, 768 + storel $g_24, %.104 + %.105 =l add %.7, 776 + storel $g_24, %.105 + %.106 =l add %.7, 784 + storel $g_24, %.106 + %.107 =l add %.7, 792 + storel $g_24, %.107 + %.108 =l add %.7, 800 + storel $g_24, %.108 + %.109 =l add %.7, 808 + storel $g_24, %.109 + %.110 =l add %.7, 816 + storel $g_24, %.110 + %.111 =l add %.7, 824 + storel $g_24, %.111 + %.112 =l add %.7, 832 + storel $g_24, %.112 + %.113 =l add %.7, 840 + storel $g_24, %.113 + %.114 =l add %.7, 848 + storel $g_24, %.114 + %.115 =l add %.7, 856 + storel $g_24, %.115 + %.116 =l add %.7, 864 + storel $g_24, %.116 + %.117 =l add %.7, 872 + storel $g_24, %.117 + %.118 =l add %.7, 880 + storel $g_24, %.118 + %.119 =l add %.7, 888 + storel $g_24, %.119 + %.120 =l add %.7, 896 + storel $g_24, %.120 + %.121 =l add %.7, 904 + storel $g_24, %.121 + %.122 =l add %.7, 912 + storel $g_24, %.122 + %.123 =l add %.7, 920 + storel $g_24, %.123 + %.124 =l add %.7, 928 + storel $g_24, %.124 + %.125 =l add %.7, 936 + storel $g_24, %.125 + %.126 =l add %.7, 944 + storel $g_24, %.126 + %.127 =l add %.7, 952 + storel $g_24, %.127 + %.128 =l add %.7, 960 + storel $g_24, %.128 + %.129 =l add %.7, 968 + storel $g_24, %.129 + %.130 =l add %.7, 976 + storel $g_24, %.130 + %.131 =l add %.7, 984 + storel $g_24, %.131 + %.132 =l add %.7, 992 + storel $g_24, %.132 + %.133 =l add %.7, 1000 + storel $g_24, %.133 + %.134 =l add %.7, 1008 + storel $g_24, %.134 + %.135 =l add %.7, 1016 + storel $g_24, %.135 + %.136 =l add %.7, 1024 + storel $g_24, %.136 + %.137 =l add %.7, 1032 + storel $g_24, %.137 + %.138 =l add %.7, 1040 + storel $g_24, %.138 + %.139 =l add %.7, 1048 + storel $g_24, %.139 + %.140 =l add %.7, 1056 + storel $g_24, %.140 + %.141 =l add %.7, 1064 + storel $g_24, %.141 + %.142 =l add %.7, 1072 + storel $g_24, %.142 + %.143 =l add %.7, 1080 + storel $g_24, %.143 + %.144 =l add %.7, 1088 + storel $g_24, %.144 + %.145 =l add %.7, 1096 + storel $g_24, %.145 + %.146 =l add %.7, 1104 + storel $g_24, %.146 + %.147 =l add %.7, 1112 + storel $g_24, %.147 + %.148 =l add %.7, 1120 + storel $g_24, %.148 + %.149 =l add %.7, 1128 + storel $g_24, %.149 + %.150 =l add %.7, 1136 + storel $g_24, %.150 + %.151 =l add %.7, 1144 + storel $g_24, %.151 + %.152 =l add %.7, 1152 + storel $g_24, %.152 + %.153 =l add %.7, 1160 + storel $g_24, %.153 + %.154 =l add %.7, 1168 + storel $g_24, %.154 + %.155 =l add %.7, 1176 + storel $g_24, %.155 + %.156 =l add %.7, 1184 + storel $g_24, %.156 + %.157 =l add %.7, 1192 + storel $g_24, %.157 + %.158 =l add %.7, 1200 + storel $g_24, %.158 + %.159 =l add %.7, 1208 + storel $g_24, %.159 + %.160 =l add %.7, 1216 + storel $g_24, %.160 + %.161 =l add %.7, 1224 + storel $g_24, %.161 + %.162 =l add %.7, 1232 + storel $g_24, %.162 + %.163 =l add %.7, 1240 + storel $g_24, %.163 + %.164 =l add %.7, 1248 + storel $g_24, %.164 + %.165 =l add %.7, 1256 + storel $g_24, %.165 + %.166 =l add %.7, 1264 + storel $g_24, %.166 + %.167 =l add %.7, 1272 + storel $g_24, %.167 + %.168 =l add %.7, 1280 + storel $g_24, %.168 + %.169 =l add %.7, 1288 + storel $g_24, %.169 + %.170 =l add %.7, 1296 + storel $g_24, %.170 + %.171 =l add %.7, 1304 + storel $g_24, %.171 + %.172 =l add %.7, 1312 + storel $g_24, %.172 + %.173 =l add %.7, 1320 + storel $g_24, %.173 + %.174 =l add %.7, 1328 + storel $g_24, %.174 + %.175 =l add %.7, 1336 + storel $g_24, %.175 + %.176 =l add %.7, 1344 + storel $g_24, %.176 + %.177 =l add %.7, 1352 + storel $g_24, %.177 + %.178 =l add %.7, 1360 + storel $g_24, %.178 + %.179 =l add %.7, 1368 + storel $g_24, %.179 + %.180 =l add %.7, 1376 + storel $g_24, %.180 + %.181 =l add %.7, 1384 + storel $g_24, %.181 + %.182 =l add %.7, 1392 + storel $g_24, %.182 + %.183 =l add %.7, 1400 + storel $g_24, %.183 + %.184 =l add %.7, 1408 + storel $g_24, %.184 + %.185 =l add %.7, 1416 + storel $g_24, %.185 + %.186 =l add %.7, 1424 + storel $g_24, %.186 + %.187 =l add %.7, 1432 + storel $g_24, %.187 + %.188 =l add %.7, 1440 + storel $g_24, %.188 + %.189 =l add %.7, 1448 + storel $g_24, %.189 + %.190 =l add %.7, 1456 + storel $g_24, %.190 + %.191 =l add %.7, 1464 + storel $g_24, %.191 + %.192 =l add %.7, 1472 + storel $g_24, %.192 + %.193 =l add %.7, 1480 + storel $g_24, %.193 + %.194 =l add %.7, 1488 + storel $g_24, %.194 + %.195 =l add %.7, 1496 + storel $g_24, %.195 + %.196 =l add %.7, 1504 + storel $g_24, %.196 + %.197 =l add %.7, 1512 + storel $g_24, %.197 + %.198 =l add %.7, 1520 + storel $g_24, %.198 + %.199 =l add %.7, 1528 + storel $g_24, %.199 + %.200 =l add %.7, 1536 + storel $g_24, %.200 + %.201 =l add %.7, 1544 + storel $g_24, %.201 + %.202 =l add %.7, 1552 + storel $g_24, %.202 + %.203 =l add %.7, 1560 + storel $g_24, %.203 + %.204 =l add %.7, 1568 + storel $g_24, %.204 + %.205 =l add %.7, 1576 + storel $g_24, %.205 + %.206 =l add %.7, 1584 + storel $g_24, %.206 + %.207 =l add %.7, 1592 + storel $g_24, %.207 + %.208 =l add %.7, 1600 + storel $g_24, %.208 + %.209 =l add %.7, 1608 + storel $g_24, %.209 + %.210 =l add %.7, 1616 + storel $g_24, %.210 + %.211 =l add %.7, 1624 + storel $g_24, %.211 + %.212 =l add %.7, 1632 + storel $g_24, %.212 + %.213 =l add %.7, 1640 + storel $g_24, %.213 + %.214 =l add %.7, 1648 + storel $g_24, %.214 + %.215 =l add %.7, 1656 + storel $g_24, %.215 + %.216 =l add %.7, 1664 + storel $g_24, %.216 + %.217 =l add %.7, 1672 + storel $g_24, %.217 + %.218 =l add %.7, 1680 + storel $g_24, %.218 + %.219 =l add %.7, 1688 + storel $g_24, %.219 + %.220 =l add %.7, 1696 + storel $g_24, %.220 + %.221 =l add %.7, 1704 + storel $g_24, %.221 + %.222 =l add %.7, 1712 + storel $g_24, %.222 + %.223 =l add %.7, 1720 + storel $g_24, %.223 + %.224 =l add %.7, 1728 + storel $g_24, %.224 + %.225 =l add %.7, 1736 + storel $g_24, %.225 + %.226 =l add %.7, 1744 + storel $g_24, %.226 + %.227 =l add %.7, 1752 + storel $g_24, %.227 + %.228 =l add %.7, 1760 + storel $g_24, %.228 + %.229 =l add %.7, 1768 + storel $g_24, %.229 + %.230 =l add %.7, 1776 + storel $g_24, %.230 + %.231 =l add %.7, 1784 + storel $g_24, %.231 + %.232 =l add %.7, 1792 + storel $g_24, %.232 + %.233 =l add %.7, 1800 + storel $g_24, %.233 + %.234 =l add %.7, 1808 + storel $g_24, %.234 + %.235 =l add %.7, 1816 + storel $g_24, %.235 + %.236 =l add %.7, 1824 + storel $g_24, %.236 + %.237 =l add %.7, 1832 + storel $g_24, %.237 + %.238 =l add %.7, 1840 + storel $g_24, %.238 + %.239 =l add %.7, 1848 + storel $g_24, %.239 + %.240 =l add %.7, 1856 + storel $g_24, %.240 + %.241 =l add %.7, 1864 + storel $g_24, %.241 + %.242 =l add %.7, 1872 + storel $g_24, %.242 + %.243 =l add %.7, 1880 + storel $g_24, %.243 + %.244 =l add %.7, 1888 + storel $g_24, %.244 + %.245 =l add %.7, 1896 + storel $g_24, %.245 + %.246 =l add %.7, 1904 + storel $g_24, %.246 + %.247 =l add %.7, 1912 + storel $g_24, %.247 + %.249 =l add %.248, 0 + storel $g_46, %.249 + %.251 =l add %.250, 0 + %.252 =w copy 1083426737 + storew %.252, %.251 + %.253 =l add %.250, 4 + %.254 =w copy 0 + storew %.254, %.253 + %.255 =l add %.250, 8 + %.256 =l extsw 0 + %.257 =l sub %.256, 1 + %.258 =w copy %.257 + storeh %.258, %.255 + %.259 =l add %.250, 10 + storeh 0, %.259 + %.260 =l add %.250, 12 + %.261 =l extsw 0 + %.262 =l sub %.261, 1 + %.263 =w copy %.262 + storew %.263, %.260 + %.264 =l add %.250, 16 + %.265 =w copy 4055616320 + storew %.265, %.264 + %.266 =l add %.250, 20 + %.267 =l extsw 0 + %.268 =l sub %.267, 1 + %.269 =w copy %.268 + storew %.269, %.266 + %.270 =l add %.250, 24 + %.271 =w copy 2 + storew %.271, %.270 + %.272 =l add %.250, 28 + %.273 =w copy 1 + storeh %.273, %.272 + %.274 =l add %.250, 30 + storeh 0, %.274 + %.275 =l add %.250, 32 + %.276 =w copy 2194741943 + storew %.276, %.275 + %.277 =l add %.250, 36 + %.278 =w copy 18446744073709551608 + storew %.278, %.277 + %.279 =l add %.250, 40 + %.280 =w copy 1 + storew %.280, %.279 + %.281 =l add %.250, 44 + %.282 =w copy 0 + storew %.282, %.281 + %.283 =l add %.250, 48 + %.284 =w copy 1167 + storeh %.284, %.283 + %.285 =l add %.250, 50 + storeh 0, %.285 + %.286 =l add %.250, 52 + %.287 =w copy 1072189932 + storew %.287, %.286 + %.288 =l add %.250, 56 + %.289 =w copy 18446744073709551609 + storew %.289, %.288 + %.290 =l add %.250, 60 + %.291 =w copy 669812072 + storew %.291, %.290 + %.292 =l add %.250, 64 + %.293 =w copy 891221781 + storew %.293, %.292 + %.294 =l add %.250, 68 + %.295 =w copy 37985 + storeh %.295, %.294 + %.296 =l add %.250, 70 + storeh 0, %.296 + %.297 =l add %.250, 72 + %.298 =w copy 8 + storew %.298, %.297 + %.299 =l add %.250, 76 + %.300 =w copy 2421504469 + storew %.300, %.299 + %.301 =l add %.250, 80 + %.302 =w copy 669812072 + storew %.302, %.301 + %.303 =l add %.250, 84 + %.304 =w copy 891221781 + storew %.304, %.303 + %.305 =l add %.250, 88 + %.306 =w copy 37985 + storeh %.306, %.305 + %.307 =l add %.250, 90 + storeh 0, %.307 + %.308 =l add %.250, 92 + %.309 =w copy 8 + storew %.309, %.308 + %.310 =l add %.250, 96 + %.311 =w copy 2421504469 + storew %.311, %.310 + %.312 =l add %.250, 100 + %.313 =w copy 1 + storew %.313, %.312 + %.314 =l add %.250, 104 + %.315 =w copy 0 + storew %.315, %.314 + %.316 =l add %.250, 108 + %.317 =w copy 1167 + storeh %.317, %.316 + %.318 =l add %.250, 110 + storeh 0, %.318 + %.319 =l add %.250, 112 + %.320 =w copy 1072189932 + storew %.320, %.319 + %.321 =l add %.250, 116 + %.322 =w copy 18446744073709551609 + storew %.322, %.321 + %.323 =l add %.250, 120 + %.324 =w copy 1691421598 + storew %.324, %.323 + %.325 =l add %.250, 124 + %.326 =w copy 2686270919 + storew %.326, %.325 + %.327 =l add %.250, 128 + %.328 =l extsw 0 + %.329 =l sub %.328, 6 + %.330 =w copy %.329 + storeh %.330, %.327 + %.331 =l add %.250, 130 + storeh 0, %.331 + %.332 =l add %.250, 132 + %.333 =w copy 3658217481 + storew %.333, %.332 + %.334 =l add %.250, 136 + %.335 =w copy 1 + storew %.335, %.334 + %.336 =l add %.250, 140 + %.337 =w copy 1691421598 + storew %.337, %.336 + %.338 =l add %.250, 144 + %.339 =w copy 2686270919 + storew %.339, %.338 + %.340 =l add %.250, 148 + %.341 =l extsw 0 + %.342 =l sub %.341, 6 + %.343 =w copy %.342 + storeh %.343, %.340 + %.344 =l add %.250, 150 + storeh 0, %.344 + %.345 =l add %.250, 152 + %.346 =w copy 3658217481 + storew %.346, %.345 + %.347 =l add %.250, 156 + %.348 =w copy 1 + storew %.348, %.347 + %.349 =l add %.250, 160 + %.350 =w copy 0 + storew %.350, %.349 + %.351 =l add %.250, 164 + %.352 =w copy 3828594409 + storew %.352, %.351 + %.353 =l add %.250, 168 + %.354 =w copy 23810 + storeh %.354, %.353 + %.355 =l add %.250, 170 + storeh 0, %.355 + %.356 =l add %.250, 172 + %.357 =w copy 2063202579 + storew %.357, %.356 + %.358 =l add %.250, 176 + %.359 =w copy 1748107750 + storew %.359, %.358 + %.360 =l add %.250, 180 + %.361 =l extsw 0 + %.362 =l sub %.361, 1 + %.363 =w copy %.362 + storew %.363, %.360 + %.364 =l add %.250, 184 + %.365 =w copy 70130414 + storew %.365, %.364 + %.366 =l add %.250, 188 + %.367 =w copy 4963 + storeh %.367, %.366 + %.368 =l add %.250, 190 + storeh 0, %.368 + %.369 =l add %.250, 192 + %.370 =l extsw 0 + %.371 =l sub %.370, 1 + %.372 =w copy %.371 + storew %.372, %.369 + %.373 =l add %.250, 196 + %.374 =w copy 1686473211 + storew %.374, %.373 + %.375 =l add %.250, 200 + %.376 =w copy 1 + storew %.376, %.375 + %.377 =l add %.250, 204 + %.378 =w copy 1 + storew %.378, %.377 + %.379 =l add %.250, 208 + %.380 =w copy 21621 + storeh %.380, %.379 + %.381 =l add %.250, 210 + storeh 0, %.381 + %.382 =l add %.250, 212 + %.383 =w copy 855572299 + storew %.383, %.382 + %.384 =l add %.250, 216 + %.385 =w copy 1 + storew %.385, %.384 + %.386 =l add %.250, 220 + %.387 =w copy 1 + storew %.387, %.386 + %.388 =l add %.250, 224 + %.389 =w copy 658990580 + storew %.389, %.388 + %.390 =l add %.250, 228 + %.391 =l extsw 0 + %.392 =l sub %.391, 2 + %.393 =w copy %.392 + storeh %.393, %.390 + %.394 =l add %.250, 230 + storeh 0, %.394 + %.395 =l add %.250, 232 + %.396 =w copy 376143518 + storew %.396, %.395 + %.397 =l add %.250, 236 + %.398 =w copy 2827151306 + storew %.398, %.397 + %.399 =l add %.250, 240 + %.400 =w copy 0 + storew %.400, %.399 + %.401 =l add %.250, 244 + %.402 =w copy 1 + storew %.402, %.401 + %.403 =l add %.250, 248 + %.404 =w copy 25431 + storeh %.404, %.403 + %.405 =l add %.250, 250 + storeh 0, %.405 + %.406 =l add %.250, 252 + %.407 =w copy 3588134414 + storew %.407, %.406 + %.408 =l add %.250, 256 + %.409 =w copy 8 + storew %.409, %.408 + %.410 =l add %.250, 260 + %.411 =w copy 1 + storew %.411, %.410 + %.412 =l add %.250, 264 + %.413 =w copy 1769489573 + storew %.413, %.412 + %.414 =l add %.250, 268 + %.415 =w copy 57523 + storeh %.415, %.414 + %.416 =l add %.250, 270 + storeh 0, %.416 + %.417 =l add %.250, 272 + %.418 =l extsw 0 + %.419 =l sub %.418, 1 + %.420 =w copy %.419 + storew %.420, %.417 + %.421 =l add %.250, 276 + %.422 =w copy 18446744073709551615 + storew %.422, %.421 + %.423 =l add %.250, 280 + %.424 =l extsw 0 + %.425 =l sub %.424, 1 + %.426 =w copy %.425 + storew %.426, %.423 + %.427 =l add %.250, 284 + %.428 =w copy 2953570971 + storew %.428, %.427 + %.429 =l add %.250, 288 + %.430 =w copy 55280 + storeh %.430, %.429 + %.431 =l add %.250, 290 + storeh 0, %.431 + %.432 =l add %.250, 292 + %.433 =w copy 890946016 + storew %.433, %.432 + %.434 =l add %.250, 296 + %.435 =w copy 1 + storew %.435, %.434 + %.436 =l add %.250, 300 + %.437 =l extsw 0 + %.438 =l sub %.437, 1 + %.439 =w copy %.438 + storew %.439, %.436 + %.440 =l add %.250, 304 + %.441 =w copy 18446744073709551615 + storew %.441, %.440 + %.442 =l add %.250, 308 + %.443 =l extsw 0 + %.444 =l sub %.443, 9 + %.445 =w copy %.444 + storeh %.445, %.442 + %.446 =l add %.250, 310 + storeh 0, %.446 + %.447 =l add %.250, 312 + %.448 =w copy 0 + storew %.448, %.447 + %.449 =l add %.250, 316 + %.450 =w copy 1638797083 + storew %.450, %.449 + %.451 =l add %.250, 320 + %.452 =w copy 465264126 + storew %.452, %.451 + %.453 =l add %.250, 324 + %.454 =w copy 1 + storew %.454, %.453 + %.455 =l add %.250, 328 + %.456 =l extsw 0 + %.457 =l sub %.456, 1 + %.458 =w copy %.457 + storeh %.458, %.455 + %.459 =l add %.250, 330 + storeh 0, %.459 + %.460 =l add %.250, 332 + %.461 =w copy 1950417622 + storew %.461, %.460 + %.462 =l add %.250, 336 + %.463 =w copy 976311328 + storew %.463, %.462 + %.464 =l add %.250, 340 + %.465 =w copy 0 + storew %.465, %.464 + %.466 =l add %.250, 344 + %.467 =w copy 3828594409 + storew %.467, %.466 + %.468 =l add %.250, 348 + %.469 =w copy 23810 + storeh %.469, %.468 + %.470 =l add %.250, 350 + storeh 0, %.470 + %.471 =l add %.250, 352 + %.472 =w copy 2063202579 + storew %.472, %.471 + %.473 =l add %.250, 356 + %.474 =w copy 1748107750 + storew %.474, %.473 + %.475 =l add %.250, 360 + %.476 =w copy 146340782 + storew %.476, %.475 + %.477 =l add %.250, 364 + %.478 =w copy 2063185036 + storew %.478, %.477 + %.479 =l add %.250, 368 + %.480 =l extsw 0 + %.481 =l sub %.480, 8 + %.482 =w copy %.481 + storeh %.482, %.479 + %.483 =l add %.250, 370 + storeh 0, %.483 + %.484 =l add %.250, 372 + %.485 =l extsw 0 + %.486 =l sub %.485, 8 + %.487 =w copy %.486 + storew %.487, %.484 + %.488 =l add %.250, 376 + %.489 =w copy 3813695288 + storew %.489, %.488 + %.490 =l add %.250, 380 + %.491 =w copy 0 + storew %.491, %.490 + %.492 =l add %.250, 384 + %.493 =w copy 1 + storew %.493, %.492 + %.494 =l add %.250, 388 + %.495 =w copy 25431 + storeh %.495, %.494 + %.496 =l add %.250, 390 + storeh 0, %.496 + %.497 =l add %.250, 392 + %.498 =w copy 3588134414 + storew %.498, %.497 + %.499 =l add %.250, 396 + %.500 =w copy 8 + storew %.500, %.499 + %.501 =l add %.250, 400 + %.502 =l extsw 0 + %.503 =l sub %.502, 1 + %.504 =w copy %.503 + storew %.504, %.501 + %.505 =l add %.250, 404 + %.506 =w copy 2953570971 + storew %.506, %.505 + %.507 =l add %.250, 408 + %.508 =w copy 55280 + storeh %.508, %.507 + %.509 =l add %.250, 410 + storeh 0, %.509 + %.510 =l add %.250, 412 + %.511 =w copy 890946016 + storew %.511, %.510 + %.512 =l add %.250, 416 + %.513 =w copy 1 + storew %.513, %.512 + %.514 =l add %.250, 420 + %.515 =w copy 1 + storew %.515, %.514 + %.516 =l add %.250, 424 + %.517 =w copy 18446744073709551615 + storew %.517, %.516 + %.518 =l add %.250, 428 + %.519 =w copy 35279 + storeh %.519, %.518 + %.520 =l add %.250, 430 + storeh 0, %.520 + %.521 =l add %.250, 432 + %.522 =l extsw 0 + %.523 =l sub %.522, 5 + %.524 =w copy %.523 + storew %.524, %.521 + %.525 =l add %.250, 436 + %.526 =w copy 1022186559 + storew %.526, %.525 + %.527 =l add %.250, 440 + %.528 =w copy 1691421598 + storew %.528, %.527 + %.529 =l add %.250, 444 + %.530 =w copy 2686270919 + storew %.530, %.529 + %.531 =l add %.250, 448 + %.532 =l extsw 0 + %.533 =l sub %.532, 6 + %.534 =w copy %.533 + storeh %.534, %.531 + %.535 =l add %.250, 450 + storeh 0, %.535 + %.536 =l add %.250, 452 + %.537 =w copy 3658217481 + storew %.537, %.536 + %.538 =l add %.250, 456 + %.539 =w copy 1 + storew %.539, %.538 + %.540 =l add %.250, 460 + %.541 =w copy 1 + storew %.541, %.540 + %.542 =l add %.250, 464 + %.543 =w copy 658990580 + storew %.543, %.542 + %.544 =l add %.250, 468 + %.545 =l extsw 0 + %.546 =l sub %.545, 2 + %.547 =w copy %.546 + storeh %.547, %.544 + %.548 =l add %.250, 470 + storeh 0, %.548 + %.549 =l add %.250, 472 + %.550 =w copy 376143518 + storew %.550, %.549 + %.551 =l add %.250, 476 + %.552 =w copy 2827151306 + storew %.552, %.551 + %.553 =l add %.250, 480 + %.554 =w copy 1 + storew %.554, %.553 + %.555 =l add %.250, 484 + %.556 =w copy 0 + storew %.556, %.555 + %.557 =l add %.250, 488 + %.558 =w copy 1167 + storeh %.558, %.557 + %.559 =l add %.250, 490 + storeh 0, %.559 + %.560 =l add %.250, 492 + %.561 =w copy 1072189932 + storew %.561, %.560 + %.562 =l add %.250, 496 + %.563 =w copy 18446744073709551609 + storew %.563, %.562 + %.564 =l add %.250, 500 + %.565 =w copy 1 + storew %.565, %.564 + %.566 =l add %.250, 504 + %.567 =w copy 18446744073709551615 + storew %.567, %.566 + %.568 =l add %.250, 508 + %.569 =w copy 35279 + storeh %.569, %.568 + %.570 =l add %.250, 510 + storeh 0, %.570 + %.571 =l add %.250, 512 + %.572 =l extsw 0 + %.573 =l sub %.572, 5 + %.574 =w copy %.573 + storew %.574, %.571 + %.575 =l add %.250, 516 + %.576 =w copy 1022186559 + storew %.576, %.575 + %.577 =l add %.250, 520 + %.578 =w copy 0 + storew %.578, %.577 + %.579 =l add %.250, 524 + %.580 =w copy 3828594409 + storew %.580, %.579 + %.581 =l add %.250, 528 + %.582 =w copy 23810 + storeh %.582, %.581 + %.583 =l add %.250, 530 + storeh 0, %.583 + %.584 =l add %.250, 532 + %.585 =w copy 2063202579 + storew %.585, %.584 + %.586 =l add %.250, 536 + %.587 =w copy 1748107750 + storew %.587, %.586 + %.588 =l add %.250, 540 + %.589 =w copy 7 + storew %.589, %.588 + %.590 =l add %.250, 544 + %.591 =w copy 1 + storew %.591, %.590 + %.592 =l add %.250, 548 + %.593 =w copy 56340 + storeh %.593, %.592 + %.594 =l add %.250, 550 + storeh 0, %.594 + %.595 =l add %.250, 552 + %.596 =w copy 3253414294 + storew %.596, %.595 + %.597 =l add %.250, 556 + %.598 =w copy 3590563017 + storew %.598, %.597 + %.599 =l add %.250, 560 + %.600 =l extsw 0 + %.601 =l sub %.600, 8 + %.602 =w copy %.601 + storew %.602, %.599 + %.603 =l add %.250, 564 + %.604 =w copy 239898201 + storew %.604, %.603 + %.605 =l add %.250, 568 + %.606 =w copy 15795 + storeh %.606, %.605 + %.607 =l add %.250, 570 + storeh 0, %.607 + %.608 =l add %.250, 572 + %.609 =w copy 0 + storew %.609, %.608 + %.610 =l add %.250, 576 + %.611 =w copy 1 + storew %.611, %.610 + %.612 =l add %.250, 580 + %.613 =w copy 1 + storew %.613, %.612 + %.614 =l add %.250, 584 + %.615 =w copy 0 + storew %.615, %.614 + %.616 =l add %.250, 588 + %.617 =w copy 1167 + storeh %.617, %.616 + %.618 =l add %.250, 590 + storeh 0, %.618 + %.619 =l add %.250, 592 + %.620 =w copy 1072189932 + storew %.620, %.619 + %.621 =l add %.250, 596 + %.622 =w copy 18446744073709551609 + storew %.622, %.621 + %.623 =l add %.250, 600 + %.624 =w copy 7 + storew %.624, %.623 + %.625 =l add %.250, 604 + %.626 =w copy 1 + storew %.626, %.625 + %.627 =l add %.250, 608 + %.628 =w copy 56340 + storeh %.628, %.627 + %.629 =l add %.250, 610 + storeh 0, %.629 + %.630 =l add %.250, 612 + %.631 =w copy 3253414294 + storew %.631, %.630 + %.632 =l add %.250, 616 + %.633 =w copy 3590563017 + storew %.633, %.632 + %.634 =l add %.250, 620 + %.635 =l extsw 0 + %.636 =l sub %.635, 8 + %.637 =w copy %.636 + storew %.637, %.634 + %.638 =l add %.250, 624 + %.639 =w copy 239898201 + storew %.639, %.638 + %.640 =l add %.250, 628 + %.641 =w copy 15795 + storeh %.641, %.640 + %.642 =l add %.250, 630 + storeh 0, %.642 + %.643 =l add %.250, 632 + %.644 =w copy 0 + storew %.644, %.643 + %.645 =l add %.250, 636 + %.646 =w copy 1 + storew %.646, %.645 + %.647 =l add %.250, 640 + %.648 =w copy 1 + storew %.648, %.647 + %.649 =l add %.250, 644 + %.650 =w copy 0 + storew %.650, %.649 + %.651 =l add %.250, 648 + %.652 =w copy 1167 + storeh %.652, %.651 + %.653 =l add %.250, 650 + storeh 0, %.653 + %.654 =l add %.250, 652 + %.655 =w copy 1072189932 + storew %.655, %.654 + %.656 =l add %.250, 656 + %.657 =w copy 18446744073709551609 + storew %.657, %.656 + %.658 =l add %.250, 660 + %.659 =w copy 804387281 + storew %.659, %.658 + %.660 =l add %.250, 664 + %.661 =w copy 2402775829 + storew %.661, %.660 + %.662 =l add %.250, 668 + %.663 =w copy 1 + storeh %.663, %.662 + %.664 =l add %.250, 670 + storeh 0, %.664 + %.665 =l add %.250, 672 + %.666 =w copy 330816246 + storew %.666, %.665 + %.667 =l add %.250, 676 + %.668 =w copy 3830945193 + storew %.668, %.667 + %.669 =l add %.250, 680 + %.670 =w copy 2436229418 + storew %.670, %.669 + %.671 =l add %.250, 684 + %.672 =w copy 2052428021 + storew %.672, %.671 + %.673 =l add %.250, 688 + %.674 =w copy 39355 + storeh %.674, %.673 + %.675 =l add %.250, 690 + storeh 0, %.675 + %.676 =l add %.250, 692 + %.677 =w copy 410469209 + storew %.677, %.676 + %.678 =l add %.250, 696 + %.679 =w copy 1 + storew %.679, %.678 + %.680 =l add %.250, 700 + %.681 =w copy 2243791941 + storew %.681, %.680 + %.682 =l add %.250, 704 + %.683 =w copy 1 + storew %.683, %.682 + %.684 =l add %.250, 708 + %.685 =w copy 23672 + storeh %.685, %.684 + %.686 =l add %.250, 710 + storeh 0, %.686 + %.687 =l add %.250, 712 + %.688 =w copy 3298880888 + storew %.688, %.687 + %.689 =l add %.250, 716 + %.690 =w copy 6 + storew %.690, %.689 + %.691 =l add %.250, 720 + %.692 =w copy 1963360965 + storew %.692, %.691 + %.693 =l add %.250, 724 + %.694 =w copy 3550624554 + storew %.694, %.693 + %.695 =l add %.250, 728 + %.696 =l extsw 0 + %.697 =l sub %.696, 1 + %.698 =w copy %.697 + storeh %.698, %.695 + %.699 =l add %.250, 730 + storeh 0, %.699 + %.700 =l add %.250, 732 + %.701 =w copy 733588941 + storew %.701, %.700 + %.702 =l add %.250, 736 + %.703 =w copy 1 + storew %.703, %.702 + %.704 =l add %.250, 740 + %.705 =w copy 2356246768 + storew %.705, %.704 + %.706 =l add %.250, 744 + %.707 =w copy 18446744073709551611 + storew %.707, %.706 + %.708 =l add %.250, 748 + %.709 =w copy 40204 + storeh %.709, %.708 + %.710 =l add %.250, 750 + storeh 0, %.710 + %.711 =l add %.250, 752 + %.712 =w copy 3056896668 + storew %.712, %.711 + %.713 =l add %.250, 756 + %.714 =w copy 7 + storew %.714, %.713 + %.715 =l add %.250, 760 + %.716 =w copy 669812072 + storew %.716, %.715 + %.717 =l add %.250, 764 + %.718 =w copy 891221781 + storew %.718, %.717 + %.719 =l add %.250, 768 + %.720 =w copy 37985 + storeh %.720, %.719 + %.721 =l add %.250, 770 + storeh 0, %.721 + %.722 =l add %.250, 772 + %.723 =w copy 8 + storew %.723, %.722 + %.724 =l add %.250, 776 + %.725 =w copy 2421504469 + storew %.725, %.724 + %.726 =l add %.250, 780 + %.727 =w copy 1575629687 + storew %.727, %.726 + %.728 =l add %.250, 784 + %.729 =w copy 18446744073709551615 + storew %.729, %.728 + %.730 =l add %.250, 788 + %.731 =w copy 1 + storeh %.731, %.730 + %.732 =l add %.250, 790 + storeh 0, %.732 + %.733 =l add %.250, 792 + %.734 =w copy 1989414205 + storew %.734, %.733 + %.735 =l add %.250, 796 + %.736 =w copy 124094497 + storew %.736, %.735 + %.737 =l add %.250, 800 + %.738 =w copy 202050518 + storew %.738, %.737 + %.739 =l add %.250, 804 + %.740 =w copy 0 + storew %.740, %.739 + %.741 =l add %.250, 808 + %.742 =w copy 6474 + storeh %.742, %.741 + %.743 =l add %.250, 810 + storeh 0, %.743 + %.744 =l add %.250, 812 + %.745 =l extsw 0 + %.746 =l sub %.745, 1 + %.747 =w copy %.746 + storew %.747, %.744 + %.748 =l add %.250, 816 + %.749 =w copy 0 + storew %.749, %.748 + %.750 =l add %.250, 820 + %.751 =w copy 9 + storew %.751, %.750 + %.752 =l add %.250, 824 + %.753 =w copy 2313779975 + storew %.753, %.752 + %.754 =l add %.250, 828 + %.755 =w copy 26682 + storeh %.755, %.754 + %.756 =l add %.250, 830 + storeh 0, %.756 + %.757 =l add %.250, 832 + %.758 =w copy 0 + storew %.758, %.757 + %.759 =l add %.250, 836 + %.760 =w copy 18446744073709551612 + storew %.760, %.759 + %.761 =l add %.250, 840 + %.762 =w copy 202050518 + storew %.762, %.761 + %.763 =l add %.250, 844 + %.764 =w copy 0 + storew %.764, %.763 + %.765 =l add %.250, 848 + %.766 =w copy 6474 + storeh %.766, %.765 + %.767 =l add %.250, 850 + storeh 0, %.767 + %.768 =l add %.250, 852 + %.769 =l extsw 0 + %.770 =l sub %.769, 1 + %.771 =w copy %.770 + storew %.771, %.768 + %.772 =l add %.250, 856 + %.773 =w copy 0 + storew %.773, %.772 + %.774 =l add %.250, 860 + %.775 =w copy 1 + storew %.775, %.774 + %.776 =l add %.250, 864 + %.777 =w copy 1769489573 + storew %.777, %.776 + %.778 =l add %.250, 868 + %.779 =w copy 57523 + storeh %.779, %.778 + %.780 =l add %.250, 870 + storeh 0, %.780 + %.781 =l add %.250, 872 + %.782 =l extsw 0 + %.783 =l sub %.782, 1 + %.784 =w copy %.783 + storew %.784, %.781 + %.785 =l add %.250, 876 + %.786 =w copy 18446744073709551615 + storew %.786, %.785 + %.787 =l add %.250, 880 + %.788 =w copy 1575629687 + storew %.788, %.787 + %.789 =l add %.250, 884 + %.790 =w copy 18446744073709551615 + storew %.790, %.789 + %.791 =l add %.250, 888 + %.792 =w copy 1 + storeh %.792, %.791 + %.793 =l add %.250, 890 + storeh 0, %.793 + %.794 =l add %.250, 892 + %.795 =w copy 1989414205 + storew %.795, %.794 + %.796 =l add %.250, 896 + %.797 =w copy 124094497 + storew %.797, %.796 + %.798 =l add %.250, 900 + %.799 =w copy 50251224 + storew %.799, %.798 + %.800 =l add %.250, 904 + %.801 =w copy 1 + storew %.801, %.800 + %.802 =l add %.250, 908 + %.803 =w copy 0 + storeh %.803, %.802 + %.804 =l add %.250, 910 + storeh 0, %.804 + %.805 =l add %.250, 912 + %.806 =l extsw 0 + %.807 =l sub %.806, 1 + %.808 =w copy %.807 + storew %.808, %.805 + %.809 =l add %.250, 916 + %.810 =w copy 3083430017 + storew %.810, %.809 + %.811 =l add %.250, 920 + %.812 =l extsw 0 + %.813 =l sub %.812, 1 + %.814 =w copy %.813 + storew %.814, %.811 + %.815 =l add %.250, 924 + %.816 =w copy 2 + storew %.816, %.815 + %.817 =l add %.250, 928 + %.818 =w copy 1 + storeh %.818, %.817 + %.819 =l add %.250, 930 + storeh 0, %.819 + %.820 =l add %.250, 932 + %.821 =w copy 2194741943 + storew %.821, %.820 + %.822 =l add %.250, 936 + %.823 =w copy 18446744073709551608 + storew %.823, %.822 + %.824 =l add %.250, 940 + %.825 =w copy 1 + storew %.825, %.824 + %.826 =l add %.250, 944 + %.827 =w copy 1 + storew %.827, %.826 + %.828 =l add %.250, 948 + %.829 =w copy 21621 + storeh %.829, %.828 + %.830 =l add %.250, 950 + storeh 0, %.830 + %.831 =l add %.250, 952 + %.832 =w copy 855572299 + storew %.832, %.831 + %.833 =l add %.250, 956 + %.834 =w copy 1 + storew %.834, %.833 + %.835 =l add %.250, 960 + %.836 =w copy 321451902 + storew %.836, %.835 + %.837 =l add %.250, 964 + %.838 =w copy 221008639 + storew %.838, %.837 + %.839 =l add %.250, 968 + %.840 =w copy 31068 + storeh %.840, %.839 + %.841 =l add %.250, 970 + storeh 0, %.841 + %.842 =l add %.250, 972 + %.843 =l extsw 0 + %.844 =l sub %.843, 1 + %.845 =w copy %.844 + storew %.845, %.842 + %.846 =l add %.250, 976 + %.847 =w copy 18446744073709551615 + storew %.847, %.846 + %.848 =l add %.250, 980 + %.849 =w copy 2436229418 + storew %.849, %.848 + %.850 =l add %.250, 984 + %.851 =w copy 2052428021 + storew %.851, %.850 + %.852 =l add %.250, 988 + %.853 =w copy 39355 + storeh %.853, %.852 + %.854 =l add %.250, 990 + storeh 0, %.854 + %.855 =l add %.250, 992 + %.856 =w copy 410469209 + storew %.856, %.855 + %.857 =l add %.250, 996 + %.858 =w copy 1 + storew %.858, %.857 + %.859 =l add %.250, 1000 + %.860 =w copy 1 + storew %.860, %.859 + %.861 =l add %.250, 1004 + %.862 =w copy 2536799018 + storew %.862, %.861 + %.863 =l add %.250, 1008 + %.864 =w copy 8956 + storeh %.864, %.863 + %.865 =l add %.250, 1010 + storeh 0, %.865 + %.866 =l add %.250, 1012 + %.867 =w copy 806078605 + storew %.867, %.866 + %.868 =l add %.250, 1016 + %.869 =w copy 2894914844 + storew %.869, %.868 + %.870 =l add %.250, 1020 + %.871 =w copy 0 + storew %.871, %.870 + %.872 =l add %.250, 1024 + %.873 =w copy 3828594409 + storew %.873, %.872 + %.874 =l add %.250, 1028 + %.875 =w copy 23810 + storeh %.875, %.874 + %.876 =l add %.250, 1030 + storeh 0, %.876 + %.877 =l add %.250, 1032 + %.878 =w copy 2063202579 + storew %.878, %.877 + %.879 =l add %.250, 1036 + %.880 =w copy 1748107750 + storew %.880, %.879 + %.881 =l add %.250, 1040 + %.882 =w copy 146340782 + storew %.882, %.881 + %.883 =l add %.250, 1044 + %.884 =w copy 2063185036 + storew %.884, %.883 + %.885 =l add %.250, 1048 + %.886 =l extsw 0 + %.887 =l sub %.886, 8 + %.888 =w copy %.887 + storeh %.888, %.885 + %.889 =l add %.250, 1050 + storeh 0, %.889 + %.890 =l add %.250, 1052 + %.891 =l extsw 0 + %.892 =l sub %.891, 8 + %.893 =w copy %.892 + storew %.893, %.890 + %.894 =l add %.250, 1056 + %.895 =w copy 3813695288 + storew %.895, %.894 + %.896 =l add %.250, 1060 + %.897 =w copy 3448018338 + storew %.897, %.896 + %.898 =l add %.250, 1064 + %.899 =w copy 1 + storew %.899, %.898 + %.900 =l add %.250, 1068 + %.901 =w copy 8346 + storeh %.901, %.900 + %.902 =l add %.250, 1070 + storeh 0, %.902 + %.903 =l add %.250, 1072 + %.904 =w copy 2430042709 + storew %.904, %.903 + %.905 =l add %.250, 1076 + %.906 =w copy 362575220 + storew %.906, %.905 + %.907 =l add %.250, 1080 + %.908 =w copy 1 + storew %.908, %.907 + %.909 =l add %.250, 1084 + %.910 =w copy 0 + storew %.910, %.909 + %.911 =l add %.250, 1088 + %.912 =w copy 1167 + storeh %.912, %.911 + %.913 =l add %.250, 1090 + storeh 0, %.913 + %.914 =l add %.250, 1092 + %.915 =w copy 1072189932 + storew %.915, %.914 + %.916 =l add %.250, 1096 + %.917 =w copy 18446744073709551609 + storew %.917, %.916 + %.918 =l add %.250, 1100 + %.919 =w copy 1 + storew %.919, %.918 + %.920 =l add %.250, 1104 + %.921 =w copy 1 + storew %.921, %.920 + %.922 =l add %.250, 1108 + %.923 =w copy 21621 + storeh %.923, %.922 + %.924 =l add %.250, 1110 + storeh 0, %.924 + %.925 =l add %.250, 1112 + %.926 =w copy 855572299 + storew %.926, %.925 + %.927 =l add %.250, 1116 + %.928 =w copy 1 + storew %.928, %.927 + %.929 =l add %.250, 1120 + %.930 =w copy 0 + storew %.930, %.929 + %.931 =l add %.250, 1124 + %.932 =w copy 1 + storew %.932, %.931 + %.933 =l add %.250, 1128 + %.934 =w copy 25431 + storeh %.934, %.933 + %.935 =l add %.250, 1130 + storeh 0, %.935 + %.936 =l add %.250, 1132 + %.937 =w copy 3588134414 + storew %.937, %.936 + %.938 =l add %.250, 1136 + %.939 =w copy 8 + storew %.939, %.938 + %.940 =l add %.250, 1140 + %.941 =w copy 1 + storew %.941, %.940 + %.942 =l add %.250, 1144 + %.943 =w copy 18446744073709551615 + storew %.943, %.942 + %.944 =l add %.250, 1148 + %.945 =w copy 1 + storeh %.945, %.944 + %.946 =l add %.250, 1150 + storeh 0, %.946 + %.947 =l add %.250, 1152 + %.948 =w copy 4158742492 + storew %.948, %.947 + %.949 =l add %.250, 1156 + %.950 =w copy 18446744073709551615 + storew %.950, %.949 + %.951 =l add %.250, 1160 + %.952 =w copy 0 + storew %.952, %.951 + %.953 =l add %.250, 1164 + %.954 =w copy 18446744073709551612 + storew %.954, %.953 + %.955 =l add %.250, 1168 + %.956 =l extsw 0 + %.957 =l sub %.956, 1 + %.958 =w copy %.957 + storeh %.958, %.955 + %.959 =l add %.250, 1170 + storeh 0, %.959 + %.960 =l add %.250, 1172 + %.961 =w copy 31118189 + storew %.961, %.960 + %.962 =l add %.250, 1176 + %.963 =w copy 2 + storew %.963, %.962 + %.964 =l add %.250, 1180 + %.965 =w copy 1 + storew %.965, %.964 + %.966 =l add %.250, 1184 + %.967 =w copy 18446744073709551615 + storew %.967, %.966 + %.968 =l add %.250, 1188 + %.969 =w copy 1 + storeh %.969, %.968 + %.970 =l add %.250, 1190 + storeh 0, %.970 + %.971 =l add %.250, 1192 + %.972 =w copy 4158742492 + storew %.972, %.971 + %.973 =l add %.250, 1196 + %.974 =w copy 18446744073709551615 + storew %.974, %.973 + %.975 =l add %.250, 1200 + %.976 =w copy 3 + storew %.976, %.975 + %.977 =l add %.250, 1204 + %.978 =w copy 18446744073709551613 + storew %.978, %.977 + %.979 =l add %.250, 1208 + %.980 =l extsw 0 + %.981 =l sub %.980, 1 + %.982 =w copy %.981 + storeh %.982, %.979 + %.983 =l add %.250, 1210 + storeh 0, %.983 + %.984 =l add %.250, 1212 + %.985 =w copy 3074106023 + storew %.985, %.984 + %.986 =l add %.250, 1216 + %.987 =w copy 0 + storew %.987, %.986 + %.988 =l add %.250, 1220 + %.989 =w copy 1103311892 + storew %.989, %.988 + %.990 =l add %.250, 1224 + %.991 =w copy 1 + storew %.991, %.990 + %.992 =l add %.250, 1228 + %.993 =w copy 1 + storeh %.993, %.992 + %.994 =l add %.250, 1230 + storeh 0, %.994 + %.995 =l add %.250, 1232 + %.996 =w copy 704967764 + storew %.996, %.995 + %.997 =l add %.250, 1236 + %.998 =w copy 7 + storew %.998, %.997 + %.999 =l add %.250, 1240 + %.1000 =w copy 3 + storew %.1000, %.999 + %.1001 =l add %.250, 1244 + %.1002 =w copy 18446744073709551613 + storew %.1002, %.1001 + %.1003 =l add %.250, 1248 + %.1004 =l extsw 0 + %.1005 =l sub %.1004, 1 + %.1006 =w copy %.1005 + storeh %.1006, %.1003 + %.1007 =l add %.250, 1250 + storeh 0, %.1007 + %.1008 =l add %.250, 1252 + %.1009 =w copy 3074106023 + storew %.1009, %.1008 + %.1010 =l add %.250, 1256 + %.1011 =w copy 0 + storew %.1011, %.1010 + %.1012 =l add %.250, 1260 + %.1013 =w copy 2844539373 + storew %.1013, %.1012 + %.1014 =l add %.250, 1264 + %.1015 =w copy 3196485425 + storew %.1015, %.1014 + %.1016 =l add %.250, 1268 + %.1017 =l extsw 0 + %.1018 =l sub %.1017, 1 + %.1019 =w copy %.1018 + storeh %.1019, %.1016 + %.1020 =l add %.250, 1270 + storeh 0, %.1020 + %.1021 =l add %.250, 1272 + %.1022 =w copy 0 + storew %.1022, %.1021 + %.1023 =l add %.250, 1276 + %.1024 =w copy 754300143 + storew %.1024, %.1023 + %.1025 =l add %.250, 1280 + %.1026 =w copy 804387281 + storew %.1026, %.1025 + %.1027 =l add %.250, 1284 + %.1028 =w copy 2402775829 + storew %.1028, %.1027 + %.1029 =l add %.250, 1288 + %.1030 =w copy 1 + storeh %.1030, %.1029 + %.1031 =l add %.250, 1290 + storeh 0, %.1031 + %.1032 =l add %.250, 1292 + %.1033 =w copy 330816246 + storew %.1033, %.1032 + %.1034 =l add %.250, 1296 + %.1035 =w copy 3830945193 + storew %.1035, %.1034 + %.1036 =l add %.250, 1300 + %.1037 =w copy 2243791941 + storew %.1037, %.1036 + %.1038 =l add %.250, 1304 + %.1039 =w copy 1 + storew %.1039, %.1038 + %.1040 =l add %.250, 1308 + %.1041 =w copy 23672 + storeh %.1041, %.1040 + %.1042 =l add %.250, 1310 + storeh 0, %.1042 + %.1043 =l add %.250, 1312 + %.1044 =w copy 3298880888 + storew %.1044, %.1043 + %.1045 =l add %.250, 1316 + %.1046 =w copy 6 + storew %.1046, %.1045 + %.1047 =l add %.250, 1320 + %.1048 =w copy 2356246768 + storew %.1048, %.1047 + %.1049 =l add %.250, 1324 + %.1050 =w copy 18446744073709551611 + storew %.1050, %.1049 + %.1051 =l add %.250, 1328 + %.1052 =w copy 40204 + storeh %.1052, %.1051 + %.1053 =l add %.250, 1330 + storeh 0, %.1053 + %.1054 =l add %.250, 1332 + %.1055 =w copy 3056896668 + storew %.1055, %.1054 + %.1056 =l add %.250, 1336 + %.1057 =w copy 7 + storew %.1057, %.1056 + %.1058 =l add %.250, 1340 + %.1059 =w copy 3448018338 + storew %.1059, %.1058 + %.1060 =l add %.250, 1344 + %.1061 =w copy 1 + storew %.1061, %.1060 + %.1062 =l add %.250, 1348 + %.1063 =w copy 8346 + storeh %.1063, %.1062 + %.1064 =l add %.250, 1350 + storeh 0, %.1064 + %.1065 =l add %.250, 1352 + %.1066 =w copy 2430042709 + storew %.1066, %.1065 + %.1067 =l add %.250, 1356 + %.1068 =w copy 362575220 + storew %.1068, %.1067 + %.1069 =l add %.250, 1360 + %.1070 =l extsw 0 + %.1071 =l sub %.1070, 1 + %.1072 =w copy %.1071 + storew %.1072, %.1069 + %.1073 =l add %.250, 1364 + %.1074 =w copy 805176143 + storew %.1074, %.1073 + %.1075 =l add %.250, 1368 + %.1076 =w copy 9977 + storeh %.1076, %.1075 + %.1077 =l add %.250, 1370 + storeh 0, %.1077 + %.1078 =l add %.250, 1372 + %.1079 =w copy 714761159 + storew %.1079, %.1078 + %.1080 =l add %.250, 1376 + %.1081 =w copy 4 + storew %.1081, %.1080 + %.1082 =l add %.250, 1380 + %.1083 =l extsw 0 + %.1084 =l sub %.1083, 1 + %.1085 =w copy %.1084 + storew %.1085, %.1082 + %.1086 =l add %.250, 1384 + %.1087 =w copy 70130414 + storew %.1087, %.1086 + %.1088 =l add %.250, 1388 + %.1089 =w copy 4963 + storeh %.1089, %.1088 + %.1090 =l add %.250, 1390 + storeh 0, %.1090 + %.1091 =l add %.250, 1392 + %.1092 =l extsw 0 + %.1093 =l sub %.1092, 1 + %.1094 =w copy %.1093 + storew %.1094, %.1091 + %.1095 =l add %.250, 1396 + %.1096 =w copy 1686473211 + storew %.1096, %.1095 + %.1097 =l add %.250, 1400 + %.1098 =w copy 669812072 + storew %.1098, %.1097 + %.1099 =l add %.250, 1404 + %.1100 =w copy 891221781 + storew %.1100, %.1099 + %.1101 =l add %.250, 1408 + %.1102 =w copy 37985 + storeh %.1102, %.1101 + %.1103 =l add %.250, 1410 + storeh 0, %.1103 + %.1104 =l add %.250, 1412 + %.1105 =w copy 8 + storew %.1105, %.1104 + %.1106 =l add %.250, 1416 + %.1107 =w copy 2421504469 + storew %.1107, %.1106 + %.1108 =l add %.250, 1420 + %.1109 =w copy 1 + storew %.1109, %.1108 + %.1110 =l add %.250, 1424 + %.1111 =w copy 2842899806 + storew %.1111, %.1110 + %.1112 =l add %.250, 1428 + %.1113 =l extsw 0 + %.1114 =l sub %.1113, 5 + %.1115 =w copy %.1114 + storeh %.1115, %.1112 + %.1116 =l add %.250, 1430 + storeh 0, %.1116 + %.1117 =l add %.250, 1432 + %.1118 =l extsw 0 + %.1119 =l sub %.1118, 7 + %.1120 =w copy %.1119 + storew %.1120, %.1117 + %.1121 =l add %.250, 1436 + %.1122 =w copy 3779923269 + storew %.1122, %.1121 + %.1123 =l add %.250, 1440 + %.1124 =w copy 2436229418 + storew %.1124, %.1123 + %.1125 =l add %.250, 1444 + %.1126 =w copy 2052428021 + storew %.1126, %.1125 + %.1127 =l add %.250, 1448 + %.1128 =w copy 39355 + storeh %.1128, %.1127 + %.1129 =l add %.250, 1450 + storeh 0, %.1129 + %.1130 =l add %.250, 1452 + %.1131 =w copy 410469209 + storew %.1131, %.1130 + %.1132 =l add %.250, 1456 + %.1133 =w copy 1 + storew %.1133, %.1132 + %.1134 =l add %.250, 1460 + %.1135 =w copy 732249490 + storew %.1135, %.1134 + %.1136 =l add %.250, 1464 + %.1137 =w copy 18446744073709551609 + storew %.1137, %.1136 + %.1138 =l add %.250, 1468 + %.1139 =w copy 32232 + storeh %.1139, %.1138 + %.1140 =l add %.250, 1470 + storeh 0, %.1140 + %.1141 =l add %.250, 1472 + %.1142 =w copy 0 + storew %.1142, %.1141 + %.1143 =l add %.250, 1476 + %.1144 =w copy 1338704947 + storew %.1144, %.1143 + %.1145 =l add %.250, 1480 + %.1146 =w copy 1 + storew %.1146, %.1145 + %.1147 =l add %.250, 1484 + %.1148 =w copy 790890217 + storew %.1148, %.1147 + %.1149 =l add %.250, 1488 + %.1150 =w copy 7776 + storeh %.1150, %.1149 + %.1151 =l add %.250, 1490 + storeh 0, %.1151 + %.1152 =l add %.250, 1492 + %.1153 =w copy 1603143842 + storew %.1153, %.1152 + %.1154 =l add %.250, 1496 + %.1155 =w copy 1259960115 + storew %.1155, %.1154 + %.1156 =l add %.250, 1500 + %.1157 =l extsw 0 + %.1158 =l sub %.1157, 1 + %.1159 =w copy %.1158 + storew %.1159, %.1156 + %.1160 =l add %.250, 1504 + %.1161 =w copy 70130414 + storew %.1161, %.1160 + %.1162 =l add %.250, 1508 + %.1163 =w copy 4963 + storeh %.1163, %.1162 + %.1164 =l add %.250, 1510 + storeh 0, %.1164 + %.1165 =l add %.250, 1512 + %.1166 =l extsw 0 + %.1167 =l sub %.1166, 1 + %.1168 =w copy %.1167 + storew %.1168, %.1165 + %.1169 =l add %.250, 1516 + %.1170 =w copy 1686473211 + storew %.1170, %.1169 + %.1171 =l add %.250, 1520 + %.1172 =l extsw 0 + %.1173 =l sub %.1172, 1 + %.1174 =w copy %.1173 + storew %.1174, %.1171 + %.1175 =l add %.250, 1524 + %.1176 =w copy 2 + storew %.1176, %.1175 + %.1177 =l add %.250, 1528 + %.1178 =w copy 1 + storeh %.1178, %.1177 + %.1179 =l add %.250, 1530 + storeh 0, %.1179 + %.1180 =l add %.250, 1532 + %.1181 =w copy 2194741943 + storew %.1181, %.1180 + %.1182 =l add %.250, 1536 + %.1183 =w copy 18446744073709551608 + storew %.1183, %.1182 + %.1184 =l add %.250, 1540 + %.1185 =w copy 2844539373 + storew %.1185, %.1184 + %.1186 =l add %.250, 1544 + %.1187 =w copy 3196485425 + storew %.1187, %.1186 + %.1188 =l add %.250, 1548 + %.1189 =l extsw 0 + %.1190 =l sub %.1189, 1 + %.1191 =w copy %.1190 + storeh %.1191, %.1188 + %.1192 =l add %.250, 1550 + storeh 0, %.1192 + %.1193 =l add %.250, 1552 + %.1194 =w copy 0 + storew %.1194, %.1193 + %.1195 =l add %.250, 1556 + %.1196 =w copy 754300143 + storew %.1196, %.1195 + %.1197 =l add %.250, 1560 + %.1198 =w copy 2356246768 + storew %.1198, %.1197 + %.1199 =l add %.250, 1564 + %.1200 =w copy 18446744073709551611 + storew %.1200, %.1199 + %.1201 =l add %.250, 1568 + %.1202 =w copy 40204 + storeh %.1202, %.1201 + %.1203 =l add %.250, 1570 + storeh 0, %.1203 + %.1204 =l add %.250, 1572 + %.1205 =w copy 3056896668 + storew %.1205, %.1204 + %.1206 =l add %.250, 1576 + %.1207 =w copy 7 + storew %.1207, %.1206 + %.1208 =l add %.250, 1580 + %.1209 =w copy 0 + storew %.1209, %.1208 + %.1210 =l add %.250, 1584 + %.1211 =w copy 3828594409 + storew %.1211, %.1210 + %.1212 =l add %.250, 1588 + %.1213 =w copy 23810 + storeh %.1213, %.1212 + %.1214 =l add %.250, 1590 + storeh 0, %.1214 + %.1215 =l add %.250, 1592 + %.1216 =w copy 2063202579 + storew %.1216, %.1215 + %.1217 =l add %.250, 1596 + %.1218 =w copy 1748107750 + storew %.1218, %.1217 + %.1219 =l add %.250, 1600 + %.1220 =l extsw 0 + %.1221 =l sub %.1220, 1 + %.1222 =w copy %.1221 + storew %.1222, %.1219 + %.1223 =l add %.250, 1604 + %.1224 =w copy 2953570971 + storew %.1224, %.1223 + %.1225 =l add %.250, 1608 + %.1226 =w copy 55280 + storeh %.1226, %.1225 + %.1227 =l add %.250, 1610 + storeh 0, %.1227 + %.1228 =l add %.250, 1612 + %.1229 =w copy 890946016 + storew %.1229, %.1228 + %.1230 =l add %.250, 1616 + %.1231 =w copy 1 + storew %.1231, %.1230 + %.1232 =l add %.250, 1620 + %.1233 =w copy 2844539373 + storew %.1233, %.1232 + %.1234 =l add %.250, 1624 + %.1235 =w copy 3196485425 + storew %.1235, %.1234 + %.1236 =l add %.250, 1628 + %.1237 =l extsw 0 + %.1238 =l sub %.1237, 1 + %.1239 =w copy %.1238 + storeh %.1239, %.1236 + %.1240 =l add %.250, 1630 + storeh 0, %.1240 + %.1241 =l add %.250, 1632 + %.1242 =w copy 0 + storew %.1242, %.1241 + %.1243 =l add %.250, 1636 + %.1244 =w copy 754300143 + storew %.1244, %.1243 + %.1245 =l add %.250, 1640 + %.1246 =w copy 1963360965 + storew %.1246, %.1245 + %.1247 =l add %.250, 1644 + %.1248 =w copy 3550624554 + storew %.1248, %.1247 + %.1249 =l add %.250, 1648 + %.1250 =l extsw 0 + %.1251 =l sub %.1250, 1 + %.1252 =w copy %.1251 + storeh %.1252, %.1249 + %.1253 =l add %.250, 1650 + storeh 0, %.1253 + %.1254 =l add %.250, 1652 + %.1255 =w copy 733588941 + storew %.1255, %.1254 + %.1256 =l add %.250, 1656 + %.1257 =w copy 1 + storew %.1257, %.1256 + %.1258 =l add %.250, 1660 + %.1259 =w copy 321451902 + storew %.1259, %.1258 + %.1260 =l add %.250, 1664 + %.1261 =w copy 221008639 + storew %.1261, %.1260 + %.1262 =l add %.250, 1668 + %.1263 =w copy 31068 + storeh %.1263, %.1262 + %.1264 =l add %.250, 1670 + storeh 0, %.1264 + %.1265 =l add %.250, 1672 + %.1266 =l extsw 0 + %.1267 =l sub %.1266, 1 + %.1268 =w copy %.1267 + storew %.1268, %.1265 + %.1269 =l add %.250, 1676 + %.1270 =w copy 18446744073709551615 + storew %.1270, %.1269 + %.1271 =l add %.250, 1680 + %.1272 =w copy 3 + storew %.1272, %.1271 + %.1273 =l add %.250, 1684 + %.1274 =w copy 18446744073709551613 + storew %.1274, %.1273 + %.1275 =l add %.250, 1688 + %.1276 =l extsw 0 + %.1277 =l sub %.1276, 1 + %.1278 =w copy %.1277 + storeh %.1278, %.1275 + %.1279 =l add %.250, 1690 + storeh 0, %.1279 + %.1280 =l add %.250, 1692 + %.1281 =w copy 3074106023 + storew %.1281, %.1280 + %.1282 =l add %.250, 1696 + %.1283 =w copy 0 + storew %.1283, %.1282 + %.1284 =l add %.250, 1700 + %.1285 =w copy 0 + storew %.1285, %.1284 + %.1286 =l add %.250, 1704 + %.1287 =w copy 1 + storew %.1287, %.1286 + %.1288 =l add %.250, 1708 + %.1289 =w copy 25431 + storeh %.1289, %.1288 + %.1290 =l add %.250, 1710 + storeh 0, %.1290 + %.1291 =l add %.250, 1712 + %.1292 =w copy 3588134414 + storew %.1292, %.1291 + %.1293 =l add %.250, 1716 + %.1294 =w copy 8 + storew %.1294, %.1293 + %.1295 =l add %.250, 1720 + %.1296 =w copy 202050518 + storew %.1296, %.1295 + %.1297 =l add %.250, 1724 + %.1298 =w copy 0 + storew %.1298, %.1297 + %.1299 =l add %.250, 1728 + %.1300 =w copy 6474 + storeh %.1300, %.1299 + %.1301 =l add %.250, 1730 + storeh 0, %.1301 + %.1302 =l add %.250, 1732 + %.1303 =l extsw 0 + %.1304 =l sub %.1303, 1 + %.1305 =w copy %.1304 + storew %.1305, %.1302 + %.1306 =l add %.250, 1736 + %.1307 =w copy 0 + storew %.1307, %.1306 + %.1308 =l add %.250, 1740 + %.1309 =w copy 1 + storew %.1309, %.1308 + %.1310 =l add %.250, 1744 + %.1311 =w copy 18446744073709551615 + storew %.1311, %.1310 + %.1312 =l add %.250, 1748 + %.1313 =w copy 1 + storeh %.1313, %.1312 + %.1314 =l add %.250, 1750 + storeh 0, %.1314 + %.1315 =l add %.250, 1752 + %.1316 =w copy 4158742492 + storew %.1316, %.1315 + %.1317 =l add %.250, 1756 + %.1318 =w copy 18446744073709551615 + storew %.1318, %.1317 + %.1319 =l add %.250, 1760 + %.1320 =w copy 1 + storew %.1320, %.1319 + %.1321 =l add %.250, 1764 + %.1322 =w copy 658990580 + storew %.1322, %.1321 + %.1323 =l add %.250, 1768 + %.1324 =l extsw 0 + %.1325 =l sub %.1324, 2 + %.1326 =w copy %.1325 + storeh %.1326, %.1323 + %.1327 =l add %.250, 1770 + storeh 0, %.1327 + %.1328 =l add %.250, 1772 + %.1329 =w copy 376143518 + storew %.1329, %.1328 + %.1330 =l add %.250, 1776 + %.1331 =w copy 2827151306 + storew %.1331, %.1330 + %.1332 =l add %.250, 1780 + %.1333 =w copy 1575629687 + storew %.1333, %.1332 + %.1334 =l add %.250, 1784 + %.1335 =w copy 18446744073709551615 + storew %.1335, %.1334 + %.1336 =l add %.250, 1788 + %.1337 =w copy 1 + storeh %.1337, %.1336 + %.1338 =l add %.250, 1790 + storeh 0, %.1338 + %.1339 =l add %.250, 1792 + %.1340 =w copy 1989414205 + storew %.1340, %.1339 + %.1341 =l add %.250, 1796 + %.1342 =w copy 124094497 + storew %.1342, %.1341 + %.1343 =l add %.250, 1800 + %.1344 =l extsw 0 + %.1345 =l sub %.1344, 1 + %.1346 =w copy %.1345 + storew %.1346, %.1343 + %.1347 =l add %.250, 1804 + %.1348 =w copy 70130414 + storew %.1348, %.1347 + %.1349 =l add %.250, 1808 + %.1350 =w copy 4963 + storeh %.1350, %.1349 + %.1351 =l add %.250, 1810 + storeh 0, %.1351 + %.1352 =l add %.250, 1812 + %.1353 =l extsw 0 + %.1354 =l sub %.1353, 1 + %.1355 =w copy %.1354 + storew %.1355, %.1352 + %.1356 =l add %.250, 1816 + %.1357 =w copy 1686473211 + storew %.1357, %.1356 + %.1358 =l add %.250, 1820 + %.1359 =w copy 321451902 + storew %.1359, %.1358 + %.1360 =l add %.250, 1824 + %.1361 =w copy 221008639 + storew %.1361, %.1360 + %.1362 =l add %.250, 1828 + %.1363 =w copy 31068 + storeh %.1363, %.1362 + %.1364 =l add %.250, 1830 + storeh 0, %.1364 + %.1365 =l add %.250, 1832 + %.1366 =l extsw 0 + %.1367 =l sub %.1366, 1 + %.1368 =w copy %.1367 + storew %.1368, %.1365 + %.1369 =l add %.250, 1836 + %.1370 =w copy 18446744073709551615 + storew %.1370, %.1369 + %.1371 =l add %.250, 1840 + %.1372 =w copy 1 + storew %.1372, %.1371 + %.1373 =l add %.250, 1844 + %.1374 =w copy 2536799018 + storew %.1374, %.1373 + %.1375 =l add %.250, 1848 + %.1376 =w copy 8956 + storeh %.1376, %.1375 + %.1377 =l add %.250, 1850 + storeh 0, %.1377 + %.1378 =l add %.250, 1852 + %.1379 =w copy 806078605 + storew %.1379, %.1378 + %.1380 =l add %.250, 1856 + %.1381 =w copy 2894914844 + storew %.1381, %.1380 + %.1382 =l add %.250, 1860 + %.1383 =w copy 3 + storew %.1383, %.1382 + %.1384 =l add %.250, 1864 + %.1385 =w copy 0 + storew %.1385, %.1384 + %.1386 =l add %.250, 1868 + %.1387 =w copy 1 + storeh %.1387, %.1386 + %.1388 =l add %.250, 1870 + storeh 0, %.1388 + %.1389 =l add %.250, 1872 + %.1390 =w copy 3107308236 + storew %.1390, %.1389 + %.1391 =l add %.250, 1876 + %.1392 =w copy 1 + storew %.1392, %.1391 + %.1393 =l add %.250, 1880 + %.1394 =w copy 7 + storew %.1394, %.1393 + %.1395 =l add %.250, 1884 + %.1396 =w copy 1 + storew %.1396, %.1395 + %.1397 =l add %.250, 1888 + %.1398 =w copy 56340 + storeh %.1398, %.1397 + %.1399 =l add %.250, 1890 + storeh 0, %.1399 + %.1400 =l add %.250, 1892 + %.1401 =w copy 3253414294 + storew %.1401, %.1400 + %.1402 =l add %.250, 1896 + %.1403 =w copy 3590563017 + storew %.1403, %.1402 + %.1404 =l add %.250, 1900 + %.1405 =l extsw 0 + %.1406 =l sub %.1405, 1 + %.1407 =w copy %.1406 + storew %.1407, %.1404 + %.1408 =l add %.250, 1904 + %.1409 =w copy 70130414 + storew %.1409, %.1408 + %.1410 =l add %.250, 1908 + %.1411 =w copy 4963 + storeh %.1411, %.1410 + %.1412 =l add %.250, 1910 + storeh 0, %.1412 + %.1413 =l add %.250, 1912 + %.1414 =l extsw 0 + %.1415 =l sub %.1414, 1 + %.1416 =w copy %.1415 + storew %.1416, %.1413 + %.1417 =l add %.250, 1916 + %.1418 =w copy 1686473211 + storew %.1418, %.1417 + %.1419 =l add %.250, 1920 + %.1420 =w copy 2436229418 + storew %.1420, %.1419 + %.1421 =l add %.250, 1924 + %.1422 =w copy 2052428021 + storew %.1422, %.1421 + %.1423 =l add %.250, 1928 + %.1424 =w copy 39355 + storeh %.1424, %.1423 + %.1425 =l add %.250, 1930 + storeh 0, %.1425 + %.1426 =l add %.250, 1932 + %.1427 =w copy 410469209 + storew %.1427, %.1426 + %.1428 =l add %.250, 1936 + %.1429 =w copy 1 + storew %.1429, %.1428 + %.1430 =l add %.250, 1940 + %.1431 =w copy 0 + storew %.1431, %.1430 + %.1432 =l add %.250, 1944 + %.1433 =w copy 3828594409 + storew %.1433, %.1432 + %.1434 =l add %.250, 1948 + %.1435 =w copy 23810 + storeh %.1435, %.1434 + %.1436 =l add %.250, 1950 + storeh 0, %.1436 + %.1437 =l add %.250, 1952 + %.1438 =w copy 2063202579 + storew %.1438, %.1437 + %.1439 =l add %.250, 1956 + %.1440 =w copy 1748107750 + storew %.1440, %.1439 + %.1441 =l add %.250, 1960 + %.1442 =w copy 3 + storew %.1442, %.1441 + %.1443 =l add %.250, 1964 + %.1444 =w copy 18446744073709551613 + storew %.1444, %.1443 + %.1445 =l add %.250, 1968 + %.1446 =l extsw 0 + %.1447 =l sub %.1446, 1 + %.1448 =w copy %.1447 + storeh %.1448, %.1445 + %.1449 =l add %.250, 1970 + storeh 0, %.1449 + %.1450 =l add %.250, 1972 + %.1451 =w copy 3074106023 + storew %.1451, %.1450 + %.1452 =l add %.250, 1976 + %.1453 =w copy 0 + storew %.1453, %.1452 + %.1454 =l add %.250, 1980 + %.1455 =l extsw 0 + %.1456 =l sub %.1455, 1 + %.1457 =w copy %.1456 + storew %.1457, %.1454 + %.1458 =l add %.250, 1984 + %.1459 =w copy 2 + storew %.1459, %.1458 + %.1460 =l add %.250, 1988 + %.1461 =w copy 1 + storeh %.1461, %.1460 + %.1462 =l add %.250, 1990 + storeh 0, %.1462 + %.1463 =l add %.250, 1992 + %.1464 =w copy 2194741943 + storew %.1464, %.1463 + %.1465 =l add %.250, 1996 + %.1466 =w copy 18446744073709551608 + storew %.1466, %.1465 + %.1467 =l add %.250, 2000 + %.1468 =w copy 3902700085 + storew %.1468, %.1467 + %.1469 =l add %.250, 2004 + %.1470 =w copy 6 + storew %.1470, %.1469 + %.1471 =l add %.250, 2008 + %.1472 =l extsw 0 + %.1473 =l sub %.1472, 10 + %.1474 =w copy %.1473 + storeh %.1474, %.1471 + %.1475 =l add %.250, 2010 + storeh 0, %.1475 + %.1476 =l add %.250, 2012 + %.1477 =w copy 1449819268 + storew %.1477, %.1476 + %.1478 =l add %.250, 2016 + %.1479 =w copy 18446744073709551615 + storew %.1479, %.1478 + %.1480 =l add %.250, 2020 + %.1481 =l extsw 0 + %.1482 =l sub %.1481, 1 + %.1483 =w copy %.1482 + storew %.1483, %.1480 + %.1484 =l add %.250, 2024 + %.1485 =w copy 805176143 + storew %.1485, %.1484 + %.1486 =l add %.250, 2028 + %.1487 =w copy 9977 + storeh %.1487, %.1486 + %.1488 =l add %.250, 2030 + storeh 0, %.1488 + %.1489 =l add %.250, 2032 + %.1490 =w copy 714761159 + storew %.1490, %.1489 + %.1491 =l add %.250, 2036 + %.1492 =w copy 4 + storew %.1492, %.1491 + %.1493 =l add %.250, 2040 + %.1494 =w copy 1 + storew %.1494, %.1493 + %.1495 =l add %.250, 2044 + %.1496 =w copy 0 + storew %.1496, %.1495 + %.1497 =l add %.250, 2048 + %.1498 =w copy 1167 + storeh %.1498, %.1497 + %.1499 =l add %.250, 2050 + storeh 0, %.1499 + %.1500 =l add %.250, 2052 + %.1501 =w copy 1072189932 + storew %.1501, %.1500 + %.1502 =l add %.250, 2056 + %.1503 =w copy 18446744073709551609 + storew %.1503, %.1502 + %.1504 =l add %.250, 2060 + %.1505 =l extsw 0 + %.1506 =l sub %.1505, 1 + %.1507 =w copy %.1506 + storew %.1507, %.1504 + %.1508 =l add %.250, 2064 + %.1509 =w copy 306860618 + storew %.1509, %.1508 + %.1510 =l add %.250, 2068 + %.1511 =l extsw 0 + %.1512 =l sub %.1511, 2 + %.1513 =w copy %.1512 + storeh %.1513, %.1510 + %.1514 =l add %.250, 2070 + storeh 0, %.1514 + %.1515 =l add %.250, 2072 + %.1516 =w copy 256486627 + storew %.1516, %.1515 + %.1517 =l add %.250, 2076 + %.1518 =w copy 0 + storew %.1518, %.1517 + %.1519 =l add %.250, 2080 + %.1520 =l extsw 0 + %.1521 =l sub %.1520, 8 + %.1522 =w copy %.1521 + storew %.1522, %.1519 + %.1523 =l add %.250, 2084 + %.1524 =w copy 239898201 + storew %.1524, %.1523 + %.1525 =l add %.250, 2088 + %.1526 =w copy 15795 + storeh %.1526, %.1525 + %.1527 =l add %.250, 2090 + storeh 0, %.1527 + %.1528 =l add %.250, 2092 + %.1529 =w copy 0 + storew %.1529, %.1528 + %.1530 =l add %.250, 2096 + %.1531 =w copy 1 + storew %.1531, %.1530 + %.1532 =l add %.250, 2100 + %.1533 =w copy 1103311892 + storew %.1533, %.1532 + %.1534 =l add %.250, 2104 + %.1535 =w copy 1 + storew %.1535, %.1534 + %.1536 =l add %.250, 2108 + %.1537 =w copy 1 + storeh %.1537, %.1536 + %.1538 =l add %.250, 2110 + storeh 0, %.1538 + %.1539 =l add %.250, 2112 + %.1540 =w copy 704967764 + storew %.1540, %.1539 + %.1541 =l add %.250, 2116 + %.1542 =w copy 7 + storew %.1542, %.1541 + %.1543 =l add %.250, 2120 + %.1544 =w copy 1 + storew %.1544, %.1543 + %.1545 =l add %.250, 2124 + %.1546 =w copy 2842899806 + storew %.1546, %.1545 + %.1547 =l add %.250, 2128 + %.1548 =l extsw 0 + %.1549 =l sub %.1548, 5 + %.1550 =w copy %.1549 + storeh %.1550, %.1547 + %.1551 =l add %.250, 2130 + storeh 0, %.1551 + %.1552 =l add %.250, 2132 + %.1553 =l extsw 0 + %.1554 =l sub %.1553, 7 + %.1555 =w copy %.1554 + storew %.1555, %.1552 + %.1556 =l add %.250, 2136 + %.1557 =w copy 3779923269 + storew %.1557, %.1556 + %.1558 =l add %.250, 2140 + %.1559 =w copy 7 + storew %.1559, %.1558 + %.1560 =l add %.250, 2144 + %.1561 =w copy 1 + storew %.1561, %.1560 + %.1562 =l add %.250, 2148 + %.1563 =w copy 56340 + storeh %.1563, %.1562 + %.1564 =l add %.250, 2150 + storeh 0, %.1564 + %.1565 =l add %.250, 2152 + %.1566 =w copy 3253414294 + storew %.1566, %.1565 + %.1567 =l add %.250, 2156 + %.1568 =w copy 3590563017 + storew %.1568, %.1567 + %.1569 =l add %.250, 2160 + %.1570 =l extsw 0 + %.1571 =l sub %.1570, 1 + %.1572 =w copy %.1571 + storew %.1572, %.1569 + %.1573 =l add %.250, 2164 + %.1574 =w copy 805176143 + storew %.1574, %.1573 + %.1575 =l add %.250, 2168 + %.1576 =w copy 9977 + storeh %.1576, %.1575 + %.1577 =l add %.250, 2170 + storeh 0, %.1577 + %.1578 =l add %.250, 2172 + %.1579 =w copy 714761159 + storew %.1579, %.1578 + %.1580 =l add %.250, 2176 + %.1581 =w copy 4 + storew %.1581, %.1580 + %.1582 =l add %.250, 2180 + %.1583 =w copy 3 + storew %.1583, %.1582 + %.1584 =l add %.250, 2184 + %.1585 =w copy 0 + storew %.1585, %.1584 + %.1586 =l add %.250, 2188 + %.1587 =w copy 1 + storeh %.1587, %.1586 + %.1588 =l add %.250, 2190 + storeh 0, %.1588 + %.1589 =l add %.250, 2192 + %.1590 =w copy 3107308236 + storew %.1590, %.1589 + %.1591 =l add %.250, 2196 + %.1592 =w copy 1 + storew %.1592, %.1591 + %.1593 =l add %.250, 2200 + %.1594 =w copy 1103311892 + storew %.1594, %.1593 + %.1595 =l add %.250, 2204 + %.1596 =w copy 1 + storew %.1596, %.1595 + %.1597 =l add %.250, 2208 + %.1598 =w copy 1 + storeh %.1598, %.1597 + %.1599 =l add %.250, 2210 + storeh 0, %.1599 + %.1600 =l add %.250, 2212 + %.1601 =w copy 704967764 + storew %.1601, %.1600 + %.1602 =l add %.250, 2216 + %.1603 =w copy 7 + storew %.1603, %.1602 + %.1604 =l add %.250, 2220 + %.1605 =w copy 3364913714 + storew %.1605, %.1604 + %.1606 =l add %.250, 2224 + %.1607 =w copy 1 + storew %.1607, %.1606 + %.1608 =l add %.250, 2228 + %.1609 =w copy 13821 + storeh %.1609, %.1608 + %.1610 =l add %.250, 2230 + storeh 0, %.1610 + %.1611 =l add %.250, 2232 + %.1612 =w copy 334994584 + storew %.1612, %.1611 + %.1613 =l add %.250, 2236 + %.1614 =w copy 559742891 + storew %.1614, %.1613 + %.1615 =l add %.250, 2240 + %.1616 =w copy 202050518 + storew %.1616, %.1615 + %.1617 =l add %.250, 2244 + %.1618 =w copy 0 + storew %.1618, %.1617 + %.1619 =l add %.250, 2248 + %.1620 =w copy 6474 + storeh %.1620, %.1619 + %.1621 =l add %.250, 2250 + storeh 0, %.1621 + %.1622 =l add %.250, 2252 + %.1623 =l extsw 0 + %.1624 =l sub %.1623, 1 + %.1625 =w copy %.1624 + storew %.1625, %.1622 + %.1626 =l add %.250, 2256 + %.1627 =w copy 0 + storew %.1627, %.1626 + %.1628 =l add %.250, 2260 + %.1629 =w copy 202050518 + storew %.1629, %.1628 + %.1630 =l add %.250, 2264 + %.1631 =w copy 0 + storew %.1631, %.1630 + %.1632 =l add %.250, 2268 + %.1633 =w copy 6474 + storeh %.1633, %.1632 + %.1634 =l add %.250, 2270 + storeh 0, %.1634 + %.1635 =l add %.250, 2272 + %.1636 =l extsw 0 + %.1637 =l sub %.1636, 1 + %.1638 =w copy %.1637 + storew %.1638, %.1635 + %.1639 =l add %.250, 2276 + %.1640 =w copy 0 + storew %.1640, %.1639 + %.1641 =l add %.250, 2280 + %.1642 =w copy 1 + storew %.1642, %.1641 + %.1643 =l add %.250, 2284 + %.1644 =w copy 2536799018 + storew %.1644, %.1643 + %.1645 =l add %.250, 2288 + %.1646 =w copy 8956 + storeh %.1646, %.1645 + %.1647 =l add %.250, 2290 + storeh 0, %.1647 + %.1648 =l add %.250, 2292 + %.1649 =w copy 806078605 + storew %.1649, %.1648 + %.1650 =l add %.250, 2296 + %.1651 =w copy 2894914844 + storew %.1651, %.1650 + %.1652 =l add %.250, 2300 + %.1653 =w copy 3902700085 + storew %.1653, %.1652 + %.1654 =l add %.250, 2304 + %.1655 =w copy 6 + storew %.1655, %.1654 + %.1656 =l add %.250, 2308 + %.1657 =l extsw 0 + %.1658 =l sub %.1657, 10 + %.1659 =w copy %.1658 + storeh %.1659, %.1656 + %.1660 =l add %.250, 2310 + storeh 0, %.1660 + %.1661 =l add %.250, 2312 + %.1662 =w copy 1449819268 + storew %.1662, %.1661 + %.1663 =l add %.250, 2316 + %.1664 =w copy 18446744073709551615 + storew %.1664, %.1663 + %.1665 =l add %.250, 2320 + %.1666 =w copy 3902700085 + storew %.1666, %.1665 + %.1667 =l add %.250, 2324 + %.1668 =w copy 6 + storew %.1668, %.1667 + %.1669 =l add %.250, 2328 + %.1670 =l extsw 0 + %.1671 =l sub %.1670, 10 + %.1672 =w copy %.1671 + storeh %.1672, %.1669 + %.1673 =l add %.250, 2330 + storeh 0, %.1673 + %.1674 =l add %.250, 2332 + %.1675 =w copy 1449819268 + storew %.1675, %.1674 + %.1676 =l add %.250, 2336 + %.1677 =w copy 18446744073709551615 + storew %.1677, %.1676 + %.1678 =l add %.250, 2340 + %.1679 =w copy 1 + storew %.1679, %.1678 + %.1680 =l add %.250, 2344 + %.1681 =w copy 2536799018 + storew %.1681, %.1680 + %.1682 =l add %.250, 2348 + %.1683 =w copy 8956 + storeh %.1683, %.1682 + %.1684 =l add %.250, 2350 + storeh 0, %.1684 + %.1685 =l add %.250, 2352 + %.1686 =w copy 806078605 + storew %.1686, %.1685 + %.1687 =l add %.250, 2356 + %.1688 =w copy 2894914844 + storew %.1688, %.1687 + %.1689 =l add %.250, 2360 + %.1690 =w copy 9 + storew %.1690, %.1689 + %.1691 =l add %.250, 2364 + %.1692 =w copy 2313779975 + storew %.1692, %.1691 + %.1693 =l add %.250, 2368 + %.1694 =w copy 26682 + storeh %.1694, %.1693 + %.1695 =l add %.250, 2370 + storeh 0, %.1695 + %.1696 =l add %.250, 2372 + %.1697 =w copy 0 + storew %.1697, %.1696 + %.1698 =l add %.250, 2376 + %.1699 =w copy 18446744073709551612 + storew %.1699, %.1698 + %.1700 =l add %.250, 2380 + %.1701 =l extsw 0 + %.1702 =l sub %.1701, 1 + %.1703 =w copy %.1702 + storew %.1703, %.1700 + %.1704 =l add %.250, 2384 + %.1705 =w copy 2953570971 + storew %.1705, %.1704 + %.1706 =l add %.250, 2388 + %.1707 =w copy 55280 + storeh %.1707, %.1706 + %.1708 =l add %.250, 2390 + storeh 0, %.1708 + %.1709 =l add %.250, 2392 + %.1710 =w copy 890946016 + storew %.1710, %.1709 + %.1711 =l add %.250, 2396 + %.1712 =w copy 1 + storew %.1712, %.1711 + %.1713 =l add %.250, 2400 + %.1714 =l extsw 0 + %.1715 =l sub %.1714, 1 + %.1716 =w copy %.1715 + storew %.1716, %.1713 + %.1717 =l add %.250, 2404 + %.1718 =w copy 70130414 + storew %.1718, %.1717 + %.1719 =l add %.250, 2408 + %.1720 =w copy 4963 + storeh %.1720, %.1719 + %.1721 =l add %.250, 2410 + storeh 0, %.1721 + %.1722 =l add %.250, 2412 + %.1723 =l extsw 0 + %.1724 =l sub %.1723, 1 + %.1725 =w copy %.1724 + storew %.1725, %.1722 + %.1726 =l add %.250, 2416 + %.1727 =w copy 1686473211 + storew %.1727, %.1726 + %.1728 =l add %.250, 2420 + %.1729 =w copy 1963360965 + storew %.1729, %.1728 + %.1730 =l add %.250, 2424 + %.1731 =w copy 3550624554 + storew %.1731, %.1730 + %.1732 =l add %.250, 2428 + %.1733 =l extsw 0 + %.1734 =l sub %.1733, 1 + %.1735 =w copy %.1734 + storeh %.1735, %.1732 + %.1736 =l add %.250, 2430 + storeh 0, %.1736 + %.1737 =l add %.250, 2432 + %.1738 =w copy 733588941 + storew %.1738, %.1737 + %.1739 =l add %.250, 2436 + %.1740 =w copy 1 + storew %.1740, %.1739 + %.1741 =l add %.250, 2440 + %.1742 =w copy 0 + storew %.1742, %.1741 + %.1743 =l add %.250, 2444 + %.1744 =w copy 18446744073709551613 + storew %.1744, %.1743 + %.1745 =l add %.250, 2448 + %.1746 =l extsw 0 + %.1747 =l sub %.1746, 7 + %.1748 =w copy %.1747 + storeh %.1748, %.1745 + %.1749 =l add %.250, 2450 + storeh 0, %.1749 + %.1750 =l add %.250, 2452 + %.1751 =w copy 4174508792 + storew %.1751, %.1750 + %.1752 =l add %.250, 2456 + %.1753 =w copy 18446744073709551607 + storew %.1753, %.1752 + %.1754 =l add %.250, 2460 + %.1755 =w copy 7 + storew %.1755, %.1754 + %.1756 =l add %.250, 2464 + %.1757 =w copy 1 + storew %.1757, %.1756 + %.1758 =l add %.250, 2468 + %.1759 =w copy 56340 + storeh %.1759, %.1758 + %.1760 =l add %.250, 2470 + storeh 0, %.1760 + %.1761 =l add %.250, 2472 + %.1762 =w copy 3253414294 + storew %.1762, %.1761 + %.1763 =l add %.250, 2476 + %.1764 =w copy 3590563017 + storew %.1764, %.1763 + %.1765 =l add %.250, 2480 + %.1766 =w copy 3364913714 + storew %.1766, %.1765 + %.1767 =l add %.250, 2484 + %.1768 =w copy 1 + storew %.1768, %.1767 + %.1769 =l add %.250, 2488 + %.1770 =w copy 13821 + storeh %.1770, %.1769 + %.1771 =l add %.250, 2490 + storeh 0, %.1771 + %.1772 =l add %.250, 2492 + %.1773 =w copy 334994584 + storew %.1773, %.1772 + %.1774 =l add %.250, 2496 + %.1775 =w copy 559742891 + storew %.1775, %.1774 + %.1776 =l add %.250, 2500 + %.1777 =w copy 1 + storew %.1777, %.1776 + %.1778 =l add %.250, 2504 + %.1779 =w copy 790890217 + storew %.1779, %.1778 + %.1780 =l add %.250, 2508 + %.1781 =w copy 7776 + storeh %.1781, %.1780 + %.1782 =l add %.250, 2510 + storeh 0, %.1782 + %.1783 =l add %.250, 2512 + %.1784 =w copy 1603143842 + storew %.1784, %.1783 + %.1785 =l add %.250, 2516 + %.1786 =w copy 1259960115 + storew %.1786, %.1785 + %.1787 =l add %.250, 2520 + %.1788 =w copy 1 + storew %.1788, %.1787 + %.1789 =l add %.250, 2524 + %.1790 =w copy 18446744073709551615 + storew %.1790, %.1789 + %.1791 =l add %.250, 2528 + %.1792 =w copy 35279 + storeh %.1792, %.1791 + %.1793 =l add %.250, 2530 + storeh 0, %.1793 + %.1794 =l add %.250, 2532 + %.1795 =l extsw 0 + %.1796 =l sub %.1795, 5 + %.1797 =w copy %.1796 + storew %.1797, %.1794 + %.1798 =l add %.250, 2536 + %.1799 =w copy 1022186559 + storew %.1799, %.1798 + %.1800 =l add %.250, 2540 + %.1801 =w copy 804387281 + storew %.1801, %.1800 + %.1802 =l add %.250, 2544 + %.1803 =w copy 2402775829 + storew %.1803, %.1802 + %.1804 =l add %.250, 2548 + %.1805 =w copy 1 + storeh %.1805, %.1804 + %.1806 =l add %.250, 2550 + storeh 0, %.1806 + %.1807 =l add %.250, 2552 + %.1808 =w copy 330816246 + storew %.1808, %.1807 + %.1809 =l add %.250, 2556 + %.1810 =w copy 3830945193 + storew %.1810, %.1809 + %.1811 =l add %.250, 2560 + %.1812 =w copy 1 + storew %.1812, %.1811 + %.1813 =l add %.250, 2564 + %.1814 =w copy 18446744073709551615 + storew %.1814, %.1813 + %.1815 =l add %.250, 2568 + %.1816 =w copy 1 + storeh %.1816, %.1815 + %.1817 =l add %.250, 2570 + storeh 0, %.1817 + %.1818 =l add %.250, 2572 + %.1819 =w copy 4158742492 + storew %.1819, %.1818 + %.1820 =l add %.250, 2576 + %.1821 =w copy 18446744073709551615 + storew %.1821, %.1820 + %.1822 =l add %.250, 2580 + %.1823 =w copy 0 + storew %.1823, %.1822 + %.1824 =l add %.250, 2584 + %.1825 =w copy 0 + storew %.1825, %.1824 + %.1826 =l add %.250, 2588 + %.1827 =w copy 29620 + storeh %.1827, %.1826 + %.1828 =l add %.250, 2590 + storeh 0, %.1828 + %.1829 =l add %.250, 2592 + %.1830 =w copy 3777737321 + storew %.1830, %.1829 + %.1831 =l add %.250, 2596 + %.1832 =w copy 3251181128 + storew %.1832, %.1831 + %.1833 =l add %.250, 2600 + %.1834 =w copy 3364913714 + storew %.1834, %.1833 + %.1835 =l add %.250, 2604 + %.1836 =w copy 1 + storew %.1836, %.1835 + %.1837 =l add %.250, 2608 + %.1838 =w copy 13821 + storeh %.1838, %.1837 + %.1839 =l add %.250, 2610 + storeh 0, %.1839 + %.1840 =l add %.250, 2612 + %.1841 =w copy 334994584 + storew %.1841, %.1840 + %.1842 =l add %.250, 2616 + %.1843 =w copy 559742891 + storew %.1843, %.1842 + %.1844 =l add %.250, 2620 + %.1845 =l extsw 0 + %.1846 =l sub %.1845, 1 + %.1847 =w copy %.1846 + storew %.1847, %.1844 + %.1848 =l add %.250, 2624 + %.1849 =w copy 805176143 + storew %.1849, %.1848 + %.1850 =l add %.250, 2628 + %.1851 =w copy 9977 + storeh %.1851, %.1850 + %.1852 =l add %.250, 2630 + storeh 0, %.1852 + %.1853 =l add %.250, 2632 + %.1854 =w copy 714761159 + storew %.1854, %.1853 + %.1855 =l add %.250, 2636 + %.1856 =w copy 4 + storew %.1856, %.1855 + %.1857 =l add %.250, 2640 + %.1858 =w copy 738655802 + storew %.1858, %.1857 + %.1859 =l add %.250, 2644 + %.1860 =w copy 0 + storew %.1860, %.1859 + %.1861 =l add %.250, 2648 + %.1862 =w copy 1 + storeh %.1862, %.1861 + %.1863 =l add %.250, 2650 + storeh 0, %.1863 + %.1864 =l add %.250, 2652 + %.1865 =l extsw 0 + %.1866 =l sub %.1865, 3 + %.1867 =w copy %.1866 + storew %.1867, %.1864 + %.1868 =l add %.250, 2656 + %.1869 =w copy 7 + storew %.1869, %.1868 + %.1870 =l add %.250, 2660 + %.1871 =w copy 1963360965 + storew %.1871, %.1870 + %.1872 =l add %.250, 2664 + %.1873 =w copy 3550624554 + storew %.1873, %.1872 + %.1874 =l add %.250, 2668 + %.1875 =l extsw 0 + %.1876 =l sub %.1875, 1 + %.1877 =w copy %.1876 + storeh %.1877, %.1874 + %.1878 =l add %.250, 2670 + storeh 0, %.1878 + %.1879 =l add %.250, 2672 + %.1880 =w copy 733588941 + storew %.1880, %.1879 + %.1881 =l add %.250, 2676 + %.1882 =w copy 1 + storew %.1882, %.1881 + %.1883 =l add %.250, 2680 + %.1884 =l extsw 0 + %.1885 =l sub %.1884, 7 + %.1886 =w copy %.1885 + storew %.1886, %.1883 + %.1887 =l add %.250, 2684 + %.1888 =w copy 1857691956 + storew %.1888, %.1887 + %.1889 =l add %.250, 2688 + %.1890 =w copy 0 + storeh %.1890, %.1889 + %.1891 =l add %.250, 2690 + storeh 0, %.1891 + %.1892 =l add %.250, 2692 + %.1893 =w copy 1683517642 + storew %.1893, %.1892 + %.1894 =l add %.250, 2696 + %.1895 =w copy 172710452 + storew %.1895, %.1894 + %.1896 =l add %.250, 2700 + %.1897 =l extsw 0 + %.1898 =l sub %.1897, 8 + %.1899 =w copy %.1898 + storew %.1899, %.1896 + %.1900 =l add %.250, 2704 + %.1901 =w copy 239898201 + storew %.1901, %.1900 + %.1902 =l add %.250, 2708 + %.1903 =w copy 15795 + storeh %.1903, %.1902 + %.1904 =l add %.250, 2710 + storeh 0, %.1904 + %.1905 =l add %.250, 2712 + %.1906 =w copy 0 + storew %.1906, %.1905 + %.1907 =l add %.250, 2716 + %.1908 =w copy 1 + storew %.1908, %.1907 + %.1909 =l add %.250, 2720 + %.1910 =w copy 9 + storew %.1910, %.1909 + %.1911 =l add %.250, 2724 + %.1912 =w copy 2313779975 + storew %.1912, %.1911 + %.1913 =l add %.250, 2728 + %.1914 =w copy 26682 + storeh %.1914, %.1913 + %.1915 =l add %.250, 2730 + storeh 0, %.1915 + %.1916 =l add %.250, 2732 + %.1917 =w copy 0 + storew %.1917, %.1916 + %.1918 =l add %.250, 2736 + %.1919 =w copy 18446744073709551612 + storew %.1919, %.1918 + %.1920 =l add %.250, 2740 + %.1921 =w copy 3 + storew %.1921, %.1920 + %.1922 =l add %.250, 2744 + %.1923 =w copy 18446744073709551613 + storew %.1923, %.1922 + %.1924 =l add %.250, 2748 + %.1925 =l extsw 0 + %.1926 =l sub %.1925, 1 + %.1927 =w copy %.1926 + storeh %.1927, %.1924 + %.1928 =l add %.250, 2750 + storeh 0, %.1928 + %.1929 =l add %.250, 2752 + %.1930 =w copy 3074106023 + storew %.1930, %.1929 + %.1931 =l add %.250, 2756 + %.1932 =w copy 0 + storew %.1932, %.1931 + %.1933 =l add %.250, 2760 + %.1934 =w copy 2844539373 + storew %.1934, %.1933 + %.1935 =l add %.250, 2764 + %.1936 =w copy 3196485425 + storew %.1936, %.1935 + %.1937 =l add %.250, 2768 + %.1938 =l extsw 0 + %.1939 =l sub %.1938, 1 + %.1940 =w copy %.1939 + storeh %.1940, %.1937 + %.1941 =l add %.250, 2770 + storeh 0, %.1941 + %.1942 =l add %.250, 2772 + %.1943 =w copy 0 + storew %.1943, %.1942 + %.1944 =l add %.250, 2776 + %.1945 =w copy 754300143 + storew %.1945, %.1944 + %.1946 =l add %.250, 2780 + %.1947 =w copy 3902700085 + storew %.1947, %.1946 + %.1948 =l add %.250, 2784 + %.1949 =w copy 6 + storew %.1949, %.1948 + %.1950 =l add %.250, 2788 + %.1951 =l extsw 0 + %.1952 =l sub %.1951, 10 + %.1953 =w copy %.1952 + storeh %.1953, %.1950 + %.1954 =l add %.250, 2790 + storeh 0, %.1954 + %.1955 =l add %.250, 2792 + %.1956 =w copy 1449819268 + storew %.1956, %.1955 + %.1957 =l add %.250, 2796 + %.1958 =w copy 18446744073709551615 + storew %.1958, %.1957 + %.1959 =l add %.250, 2800 + %.1960 =w copy 732249490 + storew %.1960, %.1959 + %.1961 =l add %.250, 2804 + %.1962 =w copy 18446744073709551609 + storew %.1962, %.1961 + %.1963 =l add %.250, 2808 + %.1964 =w copy 32232 + storeh %.1964, %.1963 + %.1965 =l add %.250, 2810 + storeh 0, %.1965 + %.1966 =l add %.250, 2812 + %.1967 =w copy 0 + storew %.1967, %.1966 + %.1968 =l add %.250, 2816 + %.1969 =w copy 1338704947 + storew %.1969, %.1968 + %.1970 =l add %.250, 2820 + %.1971 =w copy 1 + storew %.1971, %.1970 + %.1972 =l add %.250, 2824 + %.1973 =w copy 18446744073709551615 + storew %.1973, %.1972 + %.1974 =l add %.250, 2828 + %.1975 =w copy 1 + storeh %.1975, %.1974 + %.1976 =l add %.250, 2830 + storeh 0, %.1976 + %.1977 =l add %.250, 2832 + %.1978 =w copy 4158742492 + storew %.1978, %.1977 + %.1979 =l add %.250, 2836 + %.1980 =w copy 18446744073709551615 + storew %.1980, %.1979 + %.1981 =l add %.250, 2840 + %.1982 =w copy 202050518 + storew %.1982, %.1981 + %.1983 =l add %.250, 2844 + %.1984 =w copy 0 + storew %.1984, %.1983 + %.1985 =l add %.250, 2848 + %.1986 =w copy 6474 + storeh %.1986, %.1985 + %.1987 =l add %.250, 2850 + storeh 0, %.1987 + %.1988 =l add %.250, 2852 + %.1989 =l extsw 0 + %.1990 =l sub %.1989, 1 + %.1991 =w copy %.1990 + storew %.1991, %.1988 + %.1992 =l add %.250, 2856 + %.1993 =w copy 0 + storew %.1993, %.1992 + %.1994 =l add %.250, 2860 + %.1995 =w copy 0 + storew %.1995, %.1994 + %.1996 =l add %.250, 2864 + %.1997 =w copy 1 + storew %.1997, %.1996 + %.1998 =l add %.250, 2868 + %.1999 =w copy 25431 + storeh %.1999, %.1998 + %.2000 =l add %.250, 2870 + storeh 0, %.2000 + %.2001 =l add %.250, 2872 + %.2002 =w copy 3588134414 + storew %.2002, %.2001 + %.2003 =l add %.250, 2876 + %.2004 =w copy 8 + storew %.2004, %.2003 + %.2006 =l add %.2005, 0 + %.2007 =l extsw 4 + %.2008 =l mul %.2007, 1 + %.2009 =l add $g_132, %.2008 + storel %.2009, %.2006 + %.2011 =l add %.2010, 0 + %.2012 =w copy 620157876 + storew %.2012, %.2011 + %.2014 =l add %.2013, 0 + storel $g_265, %.2014 + %.2015 =l add %.2013, 8 + storel $g_265, %.2015 + %.2016 =l add %.2013, 16 + storel $g_265, %.2016 + %.2017 =l add %.2013, 24 + storel $g_265, %.2017 + %.2018 =l add %.2013, 32 + storel $g_265, %.2018 + %.2019 =l add %.2013, 40 + storel $g_265, %.2019 + %.2020 =l add %.2013, 48 + storel $g_265, %.2020 + %.2021 =l add %.2013, 56 + storel $g_265, %.2021 + %.2022 =l add %.2013, 64 + storel $g_265, %.2022 + %.2024 =l add %.2023, 0 + %.2025 =w copy 248 + storeb %.2025, %.2024 + %.2027 =l add %.2026, 0 + %.2028 =l extsw 0 + %.2029 =l copy %.2028 + storel %.2029, %.2027 + %.2031 =l add %.2030, 0 + %.2032 =w copy 2742309445 + storew %.2032, %.2031 + %.2034 =l add %.2033, 0 + %.2035 =w copy 0 + storew %.2035, %.2034 + %.2037 =l add %.2036, 0 + %.2038 =l copy $g_265 + %.2039 =l mul 16, 1 + %.2040 =l add %.2038, %.2039 + %.2041 =l copy %.2040 + storel %.2041, %.2037 + %.2043 =l add %.2042, 0 + %.2044 =w copy 196 + storeb %.2044, %.2043 + %.2046 =l add %.2045, 0 + storel $g_130, %.2046 +@lbl_234.1237 + storel $g_23, $g_38 + %.2050 =l extsw 3 + %.2051 =l mul %.2050, 320 + %.2052 =l add %.7, %.2051 + %.2053 =l extsw 3 + %.2054 =l mul %.2053, 64 + %.2055 =l add %.2052, %.2054 + %.2056 =l extsw 1 + %.2057 =l mul %.2056, 8 + %.2058 =l add %.2055, %.2057 + %.2059 =l loadl %.2058 + %.2060 =l loadl %.248 + %.2061 =w loadub %.2060 + %.2062 =w sub %.2061, 1 + storeb %.2062, %.2060 + %.2063 =w call $func_41(l %.2059, w %.2062) + %.2064 =w loaduw %.4 + %.2065 =w copy %.2064 + %.2066 =w call $func_51(w %.2065) + %.2067 =w extsb %.2066 + %.2068 =w cnew %.2067, 0 + jnz %.2068, @logic_right.1240, @logic_join.1241 +@logic_right.1240 + %.2069 =l copy 18446744073709551615 + %.2070 =l call $safe_unary_minus_func_uint64_t_u(l %.2069) + %.2071 =l extsw 2 + %.2072 =l mul %.2071, 360 + %.2073 =l add %.250, %.2072 + %.2074 =l extsw 1 + %.2075 =l mul %.2074, 120 + %.2076 =l add %.2073, %.2075 + %.2077 =l extsw 1 + %.2078 =l mul %.2077, 20 + %.2079 =l add %.2076, %.2078 + %.2080 =l loadl %.248 + %.2081 =w ceql %.6, %.2080 + %.2082 =w loadub %.6 + %.2083 =w extub %.2082 + %.2084 =w ceqw %.2081, %.2083 + %.2085 =l extsw 0 + %.2086 =w cnel %.2085, $g_46 + %.2087 =w copy %.2086 + %.2088 =l loadl %.2005 + storeb %.2087, %.2088 + %.2089 =l loadl $g_82 + %.2090 =w copy %.2089 + %.2091 =w call $safe_mul_func_int8_t_s_s(w %.2087, w %.2090) + %.2092 =w copy 247 + %.2093 =w call $safe_div_func_int8_t_s_s(w %.2091, w %.2092) + %.2094 =w loadsh $g_81 + %.2095 =w copy %.2094 + %.2096 =w call $safe_rshift_func_int8_t_s_s(w %.2095, w 4) + %.2097 =l extsb %.2096 + %.2098 =w csgel 129, %.2097 + %.2099 =l loadl %.2 + %.2100 =w loadsw %.2099 + %.2101 =l loadl $g_23 + %.2102 =w loadsw %.2101 + %.2103 =w call $safe_div_func_int32_t_s_s(w %.2100, w %.2102) + %.2104 =l extsw %.2103 + %.2105 =w cugtl %.2070, %.2104 + %.2106 =w cnew %.2105, 0 +@logic_join.1241 + %.2107 =w phi @lbl_234.1237 %.2068, @logic_right.1240 %.2106 + %.2108 =w cnew %.2107, 0 + jnz %.2108, @logic_right.1238, @logic_join.1239 +@logic_right.1238 + %.2109 =l loadl $g_80 + %.2110 =w cnel %.2109, 0 +@logic_join.1239 + %.2111 =w phi @logic_join.1241 %.2108, @logic_right.1238 %.2110 + %.2112 =w copy %.2111 + %.2113 =l copy $g_130 + %.2114 =l mul 12, 1 + %.2115 =l add %.2113, %.2114 + %.2116 =l copy %.2115 + %.2117 =w loadsw %.2116 + %.2118 =w copy %.2117 + %.2119 =w call $safe_lshift_func_uint8_t_u_u(w %.2112, w %.2118) + %.2120 =w extub %.2119 + %.2121 =w cnew %.2120, 0 + jnz %.2121, @if_true.1242, @if_false.1243 +@if_true.1242 + %.2123 =l add %.2122, 0 + %.2124 =w copy 4285949620 + storew %.2124, %.2123 + %.2126 =l add %.2125, 0 + %.2127 =w copy 613107830 + storew %.2127, %.2126 + %.2129 =l add %.2128, 0 + storel $g_23, %.2129 + %.2131 =l add %.2130, 0 + storel $g_185, %.2131 + %.2133 =l add %.2132, 0 + %.2134 =l extsw 2 + %.2135 =l mul %.2134, 360 + %.2136 =l add %.250, %.2135 + %.2137 =l extsw 1 + %.2138 =l mul %.2137, 120 + %.2139 =l add %.2136, %.2138 + %.2140 =l extsw 1 + %.2141 =l mul %.2140, 20 + %.2142 =l add %.2139, %.2141 + storel %.2142, %.2133 + %.2144 =l add %.2143, 0 + storel $g_84, %.2144 + %.2147 =l add %.2146, 0 + %.2148 =w copy 18446744073709551608 + storew %.2148, %.2147 + %.2150 =l add %.2149, 0 + %.2151 =w copy 6 + storew %.2151, %.2150 + %.2154 =l add %.2153, 0 + storel $g_81, %.2154 + %.2155 =l add %.2153, 8 + storel $g_81, %.2155 + %.2156 =l add %.2153, 16 + storel $g_81, %.2156 + %.2157 =l add %.2153, 24 + storel $g_81, %.2157 + %.2158 =l add %.2153, 32 + storel $g_81, %.2158 + %.2159 =l add %.2153, 40 + %.2160 =l extsw 2 + %.2161 =l mul %.2160, 360 + %.2162 =l add %.250, %.2161 + %.2163 =l extsw 1 + %.2164 =l mul %.2163, 120 + %.2165 =l add %.2162, %.2164 + %.2166 =l extsw 1 + %.2167 =l mul %.2166, 20 + %.2168 =l add %.2165, %.2167 + %.2169 =l copy %.2168 + %.2170 =l mul 8, 1 + %.2171 =l add %.2169, %.2170 + %.2172 =l copy %.2171 + storel %.2172, %.2159 + %.2173 =l add %.2153, 48 + %.2174 =l extsw 2 + %.2175 =l mul %.2174, 360 + %.2176 =l add %.250, %.2175 + %.2177 =l extsw 1 + %.2178 =l mul %.2177, 120 + %.2179 =l add %.2176, %.2178 + %.2180 =l extsw 1 + %.2181 =l mul %.2180, 20 + %.2182 =l add %.2179, %.2181 + %.2183 =l copy %.2182 + %.2184 =l mul 8, 1 + %.2185 =l add %.2183, %.2184 + %.2186 =l copy %.2185 + storel %.2186, %.2173 + %.2187 =l add %.2153, 56 + %.2188 =l extsw 2 + %.2189 =l mul %.2188, 360 + %.2190 =l add %.250, %.2189 + %.2191 =l extsw 1 + %.2192 =l mul %.2191, 120 + %.2193 =l add %.2190, %.2192 + %.2194 =l extsw 1 + %.2195 =l mul %.2194, 20 + %.2196 =l add %.2193, %.2195 + %.2197 =l copy %.2196 + %.2198 =l mul 8, 1 + %.2199 =l add %.2197, %.2198 + %.2200 =l copy %.2199 + storel %.2200, %.2187 + %.2201 =l add %.2153, 64 + %.2202 =l extsw 2 + %.2203 =l mul %.2202, 360 + %.2204 =l add %.250, %.2203 + %.2205 =l extsw 1 + %.2206 =l mul %.2205, 120 + %.2207 =l add %.2204, %.2206 + %.2208 =l extsw 1 + %.2209 =l mul %.2208, 20 + %.2210 =l add %.2207, %.2209 + %.2211 =l copy %.2210 + %.2212 =l mul 8, 1 + %.2213 =l add %.2211, %.2212 + %.2214 =l copy %.2213 + storel %.2214, %.2201 + %.2215 =l add %.2153, 72 + %.2216 =l extsw 2 + %.2217 =l mul %.2216, 360 + %.2218 =l add %.250, %.2217 + %.2219 =l extsw 1 + %.2220 =l mul %.2219, 120 + %.2221 =l add %.2218, %.2220 + %.2222 =l extsw 1 + %.2223 =l mul %.2222, 20 + %.2224 =l add %.2221, %.2223 + %.2225 =l copy %.2224 + %.2226 =l mul 8, 1 + %.2227 =l add %.2225, %.2226 + %.2228 =l copy %.2227 + storel %.2228, %.2215 + %.2229 =l add %.2153, 80 + storel $g_81, %.2229 + %.2230 =l add %.2153, 88 + storel $g_81, %.2230 + %.2231 =l add %.2153, 96 + storel $g_81, %.2231 + %.2232 =l add %.2153, 104 + storel $g_81, %.2232 + %.2233 =l add %.2153, 112 + storel $g_81, %.2233 + %.2234 =l add %.2153, 120 + %.2235 =l extsw 2 + %.2236 =l mul %.2235, 360 + %.2237 =l add %.250, %.2236 + %.2238 =l extsw 1 + %.2239 =l mul %.2238, 120 + %.2240 =l add %.2237, %.2239 + %.2241 =l extsw 1 + %.2242 =l mul %.2241, 20 + %.2243 =l add %.2240, %.2242 + %.2244 =l copy %.2243 + %.2245 =l mul 8, 1 + %.2246 =l add %.2244, %.2245 + %.2247 =l copy %.2246 + storel %.2247, %.2234 + %.2248 =l add %.2153, 128 + %.2249 =l extsw 2 + %.2250 =l mul %.2249, 360 + %.2251 =l add %.250, %.2250 + %.2252 =l extsw 1 + %.2253 =l mul %.2252, 120 + %.2254 =l add %.2251, %.2253 + %.2255 =l extsw 1 + %.2256 =l mul %.2255, 20 + %.2257 =l add %.2254, %.2256 + %.2258 =l copy %.2257 + %.2259 =l mul 8, 1 + %.2260 =l add %.2258, %.2259 + %.2261 =l copy %.2260 + storel %.2261, %.2248 + %.2262 =l add %.2153, 136 + %.2263 =l extsw 2 + %.2264 =l mul %.2263, 360 + %.2265 =l add %.250, %.2264 + %.2266 =l extsw 1 + %.2267 =l mul %.2266, 120 + %.2268 =l add %.2265, %.2267 + %.2269 =l extsw 1 + %.2270 =l mul %.2269, 20 + %.2271 =l add %.2268, %.2270 + %.2272 =l copy %.2271 + %.2273 =l mul 8, 1 + %.2274 =l add %.2272, %.2273 + %.2275 =l copy %.2274 + storel %.2275, %.2262 + %.2276 =l add %.2153, 144 + %.2277 =l extsw 2 + %.2278 =l mul %.2277, 360 + %.2279 =l add %.250, %.2278 + %.2280 =l extsw 1 + %.2281 =l mul %.2280, 120 + %.2282 =l add %.2279, %.2281 + %.2283 =l extsw 1 + %.2284 =l mul %.2283, 20 + %.2285 =l add %.2282, %.2284 + %.2286 =l copy %.2285 + %.2287 =l mul 8, 1 + %.2288 =l add %.2286, %.2287 + %.2289 =l copy %.2288 + storel %.2289, %.2276 + %.2290 =l add %.2153, 152 + %.2291 =l extsw 2 + %.2292 =l mul %.2291, 360 + %.2293 =l add %.250, %.2292 + %.2294 =l extsw 1 + %.2295 =l mul %.2294, 120 + %.2296 =l add %.2293, %.2295 + %.2297 =l extsw 1 + %.2298 =l mul %.2297, 20 + %.2299 =l add %.2296, %.2298 + %.2300 =l copy %.2299 + %.2301 =l mul 8, 1 + %.2302 =l add %.2300, %.2301 + %.2303 =l copy %.2302 + storel %.2303, %.2290 + %.2304 =l add %.2153, 160 + storel $g_81, %.2304 + %.2305 =l add %.2153, 168 + storel $g_81, %.2305 + %.2306 =l add %.2153, 176 + storel $g_81, %.2306 + %.2307 =l add %.2153, 184 + storel $g_81, %.2307 + %.2308 =l add %.2153, 192 + storel $g_81, %.2308 + %.2309 =l add %.2153, 200 + %.2310 =l extsw 2 + %.2311 =l mul %.2310, 360 + %.2312 =l add %.250, %.2311 + %.2313 =l extsw 1 + %.2314 =l mul %.2313, 120 + %.2315 =l add %.2312, %.2314 + %.2316 =l extsw 1 + %.2317 =l mul %.2316, 20 + %.2318 =l add %.2315, %.2317 + %.2319 =l copy %.2318 + %.2320 =l mul 8, 1 + %.2321 =l add %.2319, %.2320 + %.2322 =l copy %.2321 + storel %.2322, %.2309 + %.2323 =l add %.2153, 208 + %.2324 =l extsw 2 + %.2325 =l mul %.2324, 360 + %.2326 =l add %.250, %.2325 + %.2327 =l extsw 1 + %.2328 =l mul %.2327, 120 + %.2329 =l add %.2326, %.2328 + %.2330 =l extsw 1 + %.2331 =l mul %.2330, 20 + %.2332 =l add %.2329, %.2331 + %.2333 =l copy %.2332 + %.2334 =l mul 8, 1 + %.2335 =l add %.2333, %.2334 + %.2336 =l copy %.2335 + storel %.2336, %.2323 + %.2337 =l add %.2153, 216 + %.2338 =l extsw 2 + %.2339 =l mul %.2338, 360 + %.2340 =l add %.250, %.2339 + %.2341 =l extsw 1 + %.2342 =l mul %.2341, 120 + %.2343 =l add %.2340, %.2342 + %.2344 =l extsw 1 + %.2345 =l mul %.2344, 20 + %.2346 =l add %.2343, %.2345 + %.2347 =l copy %.2346 + %.2348 =l mul 8, 1 + %.2349 =l add %.2347, %.2348 + %.2350 =l copy %.2349 + storel %.2350, %.2337 + %.2351 =l add %.2153, 224 + %.2352 =l extsw 2 + %.2353 =l mul %.2352, 360 + %.2354 =l add %.250, %.2353 + %.2355 =l extsw 1 + %.2356 =l mul %.2355, 120 + %.2357 =l add %.2354, %.2356 + %.2358 =l extsw 1 + %.2359 =l mul %.2358, 20 + %.2360 =l add %.2357, %.2359 + %.2361 =l copy %.2360 + %.2362 =l mul 8, 1 + %.2363 =l add %.2361, %.2362 + %.2364 =l copy %.2363 + storel %.2364, %.2351 + %.2365 =l add %.2153, 232 + %.2366 =l extsw 2 + %.2367 =l mul %.2366, 360 + %.2368 =l add %.250, %.2367 + %.2369 =l extsw 1 + %.2370 =l mul %.2369, 120 + %.2371 =l add %.2368, %.2370 + %.2372 =l extsw 1 + %.2373 =l mul %.2372, 20 + %.2374 =l add %.2371, %.2373 + %.2375 =l copy %.2374 + %.2376 =l mul 8, 1 + %.2377 =l add %.2375, %.2376 + %.2378 =l copy %.2377 + storel %.2378, %.2365 + %.2379 =l add %.2153, 240 + storel $g_81, %.2379 + %.2380 =l add %.2153, 248 + storel $g_81, %.2380 + %.2381 =l add %.2153, 256 + storel $g_81, %.2381 + %.2382 =l add %.2153, 264 + storel $g_81, %.2382 + %.2383 =l add %.2153, 272 + storel $g_81, %.2383 + %.2384 =l add %.2153, 280 + %.2385 =l extsw 2 + %.2386 =l mul %.2385, 360 + %.2387 =l add %.250, %.2386 + %.2388 =l extsw 1 + %.2389 =l mul %.2388, 120 + %.2390 =l add %.2387, %.2389 + %.2391 =l extsw 1 + %.2392 =l mul %.2391, 20 + %.2393 =l add %.2390, %.2392 + %.2394 =l copy %.2393 + %.2395 =l mul 8, 1 + %.2396 =l add %.2394, %.2395 + %.2397 =l copy %.2396 + storel %.2397, %.2384 + %.2398 =l add %.2153, 288 + %.2399 =l extsw 2 + %.2400 =l mul %.2399, 360 + %.2401 =l add %.250, %.2400 + %.2402 =l extsw 1 + %.2403 =l mul %.2402, 120 + %.2404 =l add %.2401, %.2403 + %.2405 =l extsw 1 + %.2406 =l mul %.2405, 20 + %.2407 =l add %.2404, %.2406 + %.2408 =l copy %.2407 + %.2409 =l mul 8, 1 + %.2410 =l add %.2408, %.2409 + %.2411 =l copy %.2410 + storel %.2411, %.2398 + %.2412 =l add %.2153, 296 + %.2413 =l extsw 2 + %.2414 =l mul %.2413, 360 + %.2415 =l add %.250, %.2414 + %.2416 =l extsw 1 + %.2417 =l mul %.2416, 120 + %.2418 =l add %.2415, %.2417 + %.2419 =l extsw 1 + %.2420 =l mul %.2419, 20 + %.2421 =l add %.2418, %.2420 + %.2422 =l copy %.2421 + %.2423 =l mul 8, 1 + %.2424 =l add %.2422, %.2423 + %.2425 =l copy %.2424 + storel %.2425, %.2412 + %.2426 =l add %.2153, 304 + %.2427 =l extsw 2 + %.2428 =l mul %.2427, 360 + %.2429 =l add %.250, %.2428 + %.2430 =l extsw 1 + %.2431 =l mul %.2430, 120 + %.2432 =l add %.2429, %.2431 + %.2433 =l extsw 1 + %.2434 =l mul %.2433, 20 + %.2435 =l add %.2432, %.2434 + %.2436 =l copy %.2435 + %.2437 =l mul 8, 1 + %.2438 =l add %.2436, %.2437 + %.2439 =l copy %.2438 + storel %.2439, %.2426 + %.2440 =l add %.2153, 312 + %.2441 =l extsw 2 + %.2442 =l mul %.2441, 360 + %.2443 =l add %.250, %.2442 + %.2444 =l extsw 1 + %.2445 =l mul %.2444, 120 + %.2446 =l add %.2443, %.2445 + %.2447 =l extsw 1 + %.2448 =l mul %.2447, 20 + %.2449 =l add %.2446, %.2448 + %.2450 =l copy %.2449 + %.2451 =l mul 8, 1 + %.2452 =l add %.2450, %.2451 + %.2453 =l copy %.2452 + storel %.2453, %.2440 + %.2454 =l add %.2153, 320 + storel $g_81, %.2454 + %.2455 =l add %.2153, 328 + storel $g_81, %.2455 + %.2456 =l add %.2153, 336 + storel $g_81, %.2456 + %.2457 =l add %.2153, 344 + storel $g_81, %.2457 + %.2458 =l add %.2153, 352 + storel $g_81, %.2458 + %.2459 =l add %.2153, 360 + %.2460 =l extsw 2 + %.2461 =l mul %.2460, 360 + %.2462 =l add %.250, %.2461 + %.2463 =l extsw 1 + %.2464 =l mul %.2463, 120 + %.2465 =l add %.2462, %.2464 + %.2466 =l extsw 1 + %.2467 =l mul %.2466, 20 + %.2468 =l add %.2465, %.2467 + %.2469 =l copy %.2468 + %.2470 =l mul 8, 1 + %.2471 =l add %.2469, %.2470 + %.2472 =l copy %.2471 + storel %.2472, %.2459 + %.2473 =l add %.2153, 368 + %.2474 =l extsw 2 + %.2475 =l mul %.2474, 360 + %.2476 =l add %.250, %.2475 + %.2477 =l extsw 1 + %.2478 =l mul %.2477, 120 + %.2479 =l add %.2476, %.2478 + %.2480 =l extsw 1 + %.2481 =l mul %.2480, 20 + %.2482 =l add %.2479, %.2481 + %.2483 =l copy %.2482 + %.2484 =l mul 8, 1 + %.2485 =l add %.2483, %.2484 + %.2486 =l copy %.2485 + storel %.2486, %.2473 + %.2487 =l add %.2153, 376 + %.2488 =l extsw 2 + %.2489 =l mul %.2488, 360 + %.2490 =l add %.250, %.2489 + %.2491 =l extsw 1 + %.2492 =l mul %.2491, 120 + %.2493 =l add %.2490, %.2492 + %.2494 =l extsw 1 + %.2495 =l mul %.2494, 20 + %.2496 =l add %.2493, %.2495 + %.2497 =l copy %.2496 + %.2498 =l mul 8, 1 + %.2499 =l add %.2497, %.2498 + %.2500 =l copy %.2499 + storel %.2500, %.2487 + %.2501 =l add %.2153, 384 + %.2502 =l extsw 2 + %.2503 =l mul %.2502, 360 + %.2504 =l add %.250, %.2503 + %.2505 =l extsw 1 + %.2506 =l mul %.2505, 120 + %.2507 =l add %.2504, %.2506 + %.2508 =l extsw 1 + %.2509 =l mul %.2508, 20 + %.2510 =l add %.2507, %.2509 + %.2511 =l copy %.2510 + %.2512 =l mul 8, 1 + %.2513 =l add %.2511, %.2512 + %.2514 =l copy %.2513 + storel %.2514, %.2501 + %.2515 =l add %.2153, 392 + %.2516 =l extsw 2 + %.2517 =l mul %.2516, 360 + %.2518 =l add %.250, %.2517 + %.2519 =l extsw 1 + %.2520 =l mul %.2519, 120 + %.2521 =l add %.2518, %.2520 + %.2522 =l extsw 1 + %.2523 =l mul %.2522, 20 + %.2524 =l add %.2521, %.2523 + %.2525 =l copy %.2524 + %.2526 =l mul 8, 1 + %.2527 =l add %.2525, %.2526 + %.2528 =l copy %.2527 + storel %.2528, %.2515 + %.2529 =l add %.2153, 400 + storel $g_81, %.2529 + %.2530 =l add %.2153, 408 + storel $g_81, %.2530 + %.2531 =l add %.2153, 416 + storel $g_81, %.2531 + %.2532 =l add %.2153, 424 + storel $g_81, %.2532 + %.2533 =l add %.2153, 432 + storel $g_81, %.2533 + %.2534 =l add %.2153, 440 + %.2535 =l extsw 2 + %.2536 =l mul %.2535, 360 + %.2537 =l add %.250, %.2536 + %.2538 =l extsw 1 + %.2539 =l mul %.2538, 120 + %.2540 =l add %.2537, %.2539 + %.2541 =l extsw 1 + %.2542 =l mul %.2541, 20 + %.2543 =l add %.2540, %.2542 + %.2544 =l copy %.2543 + %.2545 =l mul 8, 1 + %.2546 =l add %.2544, %.2545 + %.2547 =l copy %.2546 + storel %.2547, %.2534 + %.2548 =l add %.2153, 448 + %.2549 =l extsw 2 + %.2550 =l mul %.2549, 360 + %.2551 =l add %.250, %.2550 + %.2552 =l extsw 1 + %.2553 =l mul %.2552, 120 + %.2554 =l add %.2551, %.2553 + %.2555 =l extsw 1 + %.2556 =l mul %.2555, 20 + %.2557 =l add %.2554, %.2556 + %.2558 =l copy %.2557 + %.2559 =l mul 8, 1 + %.2560 =l add %.2558, %.2559 + %.2561 =l copy %.2560 + storel %.2561, %.2548 + %.2562 =l add %.2153, 456 + %.2563 =l extsw 2 + %.2564 =l mul %.2563, 360 + %.2565 =l add %.250, %.2564 + %.2566 =l extsw 1 + %.2567 =l mul %.2566, 120 + %.2568 =l add %.2565, %.2567 + %.2569 =l extsw 1 + %.2570 =l mul %.2569, 20 + %.2571 =l add %.2568, %.2570 + %.2572 =l copy %.2571 + %.2573 =l mul 8, 1 + %.2574 =l add %.2572, %.2573 + %.2575 =l copy %.2574 + storel %.2575, %.2562 + %.2576 =l add %.2153, 464 + %.2577 =l extsw 2 + %.2578 =l mul %.2577, 360 + %.2579 =l add %.250, %.2578 + %.2580 =l extsw 1 + %.2581 =l mul %.2580, 120 + %.2582 =l add %.2579, %.2581 + %.2583 =l extsw 1 + %.2584 =l mul %.2583, 20 + %.2585 =l add %.2582, %.2584 + %.2586 =l copy %.2585 + %.2587 =l mul 8, 1 + %.2588 =l add %.2586, %.2587 + %.2589 =l copy %.2588 + storel %.2589, %.2576 + %.2590 =l add %.2153, 472 + %.2591 =l extsw 2 + %.2592 =l mul %.2591, 360 + %.2593 =l add %.250, %.2592 + %.2594 =l extsw 1 + %.2595 =l mul %.2594, 120 + %.2596 =l add %.2593, %.2595 + %.2597 =l extsw 1 + %.2598 =l mul %.2597, 20 + %.2599 =l add %.2596, %.2598 + %.2600 =l copy %.2599 + %.2601 =l mul 8, 1 + %.2602 =l add %.2600, %.2601 + %.2603 =l copy %.2602 + storel %.2603, %.2590 + %.2604 =l add %.2153, 480 + storel $g_81, %.2604 + %.2605 =l add %.2153, 488 + storel $g_81, %.2605 + %.2606 =l add %.2153, 496 + storel $g_81, %.2606 + %.2607 =l add %.2153, 504 + storel $g_81, %.2607 + %.2608 =l add %.2153, 512 + storel $g_81, %.2608 + %.2609 =l add %.2153, 520 + %.2610 =l extsw 2 + %.2611 =l mul %.2610, 360 + %.2612 =l add %.250, %.2611 + %.2613 =l extsw 1 + %.2614 =l mul %.2613, 120 + %.2615 =l add %.2612, %.2614 + %.2616 =l extsw 1 + %.2617 =l mul %.2616, 20 + %.2618 =l add %.2615, %.2617 + %.2619 =l copy %.2618 + %.2620 =l mul 8, 1 + %.2621 =l add %.2619, %.2620 + %.2622 =l copy %.2621 + storel %.2622, %.2609 + %.2623 =l add %.2153, 528 + %.2624 =l extsw 2 + %.2625 =l mul %.2624, 360 + %.2626 =l add %.250, %.2625 + %.2627 =l extsw 1 + %.2628 =l mul %.2627, 120 + %.2629 =l add %.2626, %.2628 + %.2630 =l extsw 1 + %.2631 =l mul %.2630, 20 + %.2632 =l add %.2629, %.2631 + %.2633 =l copy %.2632 + %.2634 =l mul 8, 1 + %.2635 =l add %.2633, %.2634 + %.2636 =l copy %.2635 + storel %.2636, %.2623 + %.2637 =l add %.2153, 536 + %.2638 =l extsw 2 + %.2639 =l mul %.2638, 360 + %.2640 =l add %.250, %.2639 + %.2641 =l extsw 1 + %.2642 =l mul %.2641, 120 + %.2643 =l add %.2640, %.2642 + %.2644 =l extsw 1 + %.2645 =l mul %.2644, 20 + %.2646 =l add %.2643, %.2645 + %.2647 =l copy %.2646 + %.2648 =l mul 8, 1 + %.2649 =l add %.2647, %.2648 + %.2650 =l copy %.2649 + storel %.2650, %.2637 + %.2651 =l add %.2153, 544 + %.2652 =l extsw 2 + %.2653 =l mul %.2652, 360 + %.2654 =l add %.250, %.2653 + %.2655 =l extsw 1 + %.2656 =l mul %.2655, 120 + %.2657 =l add %.2654, %.2656 + %.2658 =l extsw 1 + %.2659 =l mul %.2658, 20 + %.2660 =l add %.2657, %.2659 + %.2661 =l copy %.2660 + %.2662 =l mul 8, 1 + %.2663 =l add %.2661, %.2662 + %.2664 =l copy %.2663 + storel %.2664, %.2651 + %.2665 =l add %.2153, 552 + %.2666 =l extsw 2 + %.2667 =l mul %.2666, 360 + %.2668 =l add %.250, %.2667 + %.2669 =l extsw 1 + %.2670 =l mul %.2669, 120 + %.2671 =l add %.2668, %.2670 + %.2672 =l extsw 1 + %.2673 =l mul %.2672, 20 + %.2674 =l add %.2671, %.2673 + %.2675 =l copy %.2674 + %.2676 =l mul 8, 1 + %.2677 =l add %.2675, %.2676 + %.2678 =l copy %.2677 + storel %.2678, %.2665 + %.2679 =l add %.2153, 560 + storel $g_81, %.2679 + %.2680 =l add %.2153, 568 + storel $g_81, %.2680 + %.2681 =l add %.2153, 576 + storel $g_81, %.2681 + %.2682 =l add %.2153, 584 + storel $g_81, %.2682 + %.2683 =l add %.2153, 592 + storel $g_81, %.2683 + %.2684 =l add %.2153, 600 + %.2685 =l extsw 2 + %.2686 =l mul %.2685, 360 + %.2687 =l add %.250, %.2686 + %.2688 =l extsw 1 + %.2689 =l mul %.2688, 120 + %.2690 =l add %.2687, %.2689 + %.2691 =l extsw 1 + %.2692 =l mul %.2691, 20 + %.2693 =l add %.2690, %.2692 + %.2694 =l copy %.2693 + %.2695 =l mul 8, 1 + %.2696 =l add %.2694, %.2695 + %.2697 =l copy %.2696 + storel %.2697, %.2684 + %.2698 =l add %.2153, 608 + %.2699 =l extsw 2 + %.2700 =l mul %.2699, 360 + %.2701 =l add %.250, %.2700 + %.2702 =l extsw 1 + %.2703 =l mul %.2702, 120 + %.2704 =l add %.2701, %.2703 + %.2705 =l extsw 1 + %.2706 =l mul %.2705, 20 + %.2707 =l add %.2704, %.2706 + %.2708 =l copy %.2707 + %.2709 =l mul 8, 1 + %.2710 =l add %.2708, %.2709 + %.2711 =l copy %.2710 + storel %.2711, %.2698 + %.2712 =l add %.2153, 616 + %.2713 =l extsw 2 + %.2714 =l mul %.2713, 360 + %.2715 =l add %.250, %.2714 + %.2716 =l extsw 1 + %.2717 =l mul %.2716, 120 + %.2718 =l add %.2715, %.2717 + %.2719 =l extsw 1 + %.2720 =l mul %.2719, 20 + %.2721 =l add %.2718, %.2720 + %.2722 =l copy %.2721 + %.2723 =l mul 8, 1 + %.2724 =l add %.2722, %.2723 + %.2725 =l copy %.2724 + storel %.2725, %.2712 + %.2726 =l add %.2153, 624 + %.2727 =l extsw 2 + %.2728 =l mul %.2727, 360 + %.2729 =l add %.250, %.2728 + %.2730 =l extsw 1 + %.2731 =l mul %.2730, 120 + %.2732 =l add %.2729, %.2731 + %.2733 =l extsw 1 + %.2734 =l mul %.2733, 20 + %.2735 =l add %.2732, %.2734 + %.2736 =l copy %.2735 + %.2737 =l mul 8, 1 + %.2738 =l add %.2736, %.2737 + %.2739 =l copy %.2738 + storel %.2739, %.2726 + %.2740 =l add %.2153, 632 + %.2741 =l extsw 2 + %.2742 =l mul %.2741, 360 + %.2743 =l add %.250, %.2742 + %.2744 =l extsw 1 + %.2745 =l mul %.2744, 120 + %.2746 =l add %.2743, %.2745 + %.2747 =l extsw 1 + %.2748 =l mul %.2747, 20 + %.2749 =l add %.2746, %.2748 + %.2750 =l copy %.2749 + %.2751 =l mul 8, 1 + %.2752 =l add %.2750, %.2751 + %.2753 =l copy %.2752 + storel %.2753, %.2740 + %.2754 =l add %.2153, 640 + storel $g_81, %.2754 + %.2755 =l add %.2153, 648 + storel $g_81, %.2755 + %.2756 =l add %.2153, 656 + storel $g_81, %.2756 + %.2757 =l add %.2153, 664 + storel $g_81, %.2757 + %.2758 =l add %.2153, 672 + storel $g_81, %.2758 + %.2759 =l add %.2153, 680 + %.2760 =l extsw 2 + %.2761 =l mul %.2760, 360 + %.2762 =l add %.250, %.2761 + %.2763 =l extsw 1 + %.2764 =l mul %.2763, 120 + %.2765 =l add %.2762, %.2764 + %.2766 =l extsw 1 + %.2767 =l mul %.2766, 20 + %.2768 =l add %.2765, %.2767 + %.2769 =l copy %.2768 + %.2770 =l mul 8, 1 + %.2771 =l add %.2769, %.2770 + %.2772 =l copy %.2771 + storel %.2772, %.2759 + %.2773 =l add %.2153, 688 + %.2774 =l extsw 2 + %.2775 =l mul %.2774, 360 + %.2776 =l add %.250, %.2775 + %.2777 =l extsw 1 + %.2778 =l mul %.2777, 120 + %.2779 =l add %.2776, %.2778 + %.2780 =l extsw 1 + %.2781 =l mul %.2780, 20 + %.2782 =l add %.2779, %.2781 + %.2783 =l copy %.2782 + %.2784 =l mul 8, 1 + %.2785 =l add %.2783, %.2784 + %.2786 =l copy %.2785 + storel %.2786, %.2773 + %.2787 =l add %.2153, 696 + %.2788 =l extsw 2 + %.2789 =l mul %.2788, 360 + %.2790 =l add %.250, %.2789 + %.2791 =l extsw 1 + %.2792 =l mul %.2791, 120 + %.2793 =l add %.2790, %.2792 + %.2794 =l extsw 1 + %.2795 =l mul %.2794, 20 + %.2796 =l add %.2793, %.2795 + %.2797 =l copy %.2796 + %.2798 =l mul 8, 1 + %.2799 =l add %.2797, %.2798 + %.2800 =l copy %.2799 + storel %.2800, %.2787 + %.2801 =l add %.2153, 704 + %.2802 =l extsw 2 + %.2803 =l mul %.2802, 360 + %.2804 =l add %.250, %.2803 + %.2805 =l extsw 1 + %.2806 =l mul %.2805, 120 + %.2807 =l add %.2804, %.2806 + %.2808 =l extsw 1 + %.2809 =l mul %.2808, 20 + %.2810 =l add %.2807, %.2809 + %.2811 =l copy %.2810 + %.2812 =l mul 8, 1 + %.2813 =l add %.2811, %.2812 + %.2814 =l copy %.2813 + storel %.2814, %.2801 + %.2815 =l add %.2153, 712 + %.2816 =l extsw 2 + %.2817 =l mul %.2816, 360 + %.2818 =l add %.250, %.2817 + %.2819 =l extsw 1 + %.2820 =l mul %.2819, 120 + %.2821 =l add %.2818, %.2820 + %.2822 =l extsw 1 + %.2823 =l mul %.2822, 20 + %.2824 =l add %.2821, %.2823 + %.2825 =l copy %.2824 + %.2826 =l mul 8, 1 + %.2827 =l add %.2825, %.2826 + %.2828 =l copy %.2827 + storel %.2828, %.2815 + %.2830 =l add %.2829, 0 + %.2831 =w copy 5 + storeb %.2831, %.2830 + %.2833 =l add %.2832, 0 + %.2834 =w copy 188 + storeb %.2834, %.2833 + %.2836 =l add %.2835, 0 + %.2837 =l extsw 0 + %.2838 =l sub %.2837, 3 + %.2839 =w copy %.2838 + storew %.2839, %.2836 + storew 0, %.2840 +@for_cond.1244 + %.2843 =w loadsw %.2840 + %.2844 =w csltw %.2843, 2 + jnz %.2844, @for_body.1245, @for_join.1247 +@for_body.1245 + %.2845 =w loadsw %.2840 + %.2846 =l extsw %.2845 + %.2847 =l mul %.2846, 8 + %.2848 =l add %.2145, %.2847 + storel $g_296, %.2848 +@for_cont.1246 + %.2849 =w loadsw %.2840 + %.2850 =w add %.2849, 1 + storew %.2850, %.2840 + jmp @for_cond.1244 +@for_join.1247 + storew 0, %.2840 +@for_cond.1248 + %.2851 =w loadsw %.2840 + %.2852 =w csltw %.2851, 1 + jnz %.2852, @for_body.1249, @for_join.1251 +@for_body.1249 + %.2853 =w copy 3422380986 + %.2854 =w loadsw %.2840 + %.2855 =l extsw %.2854 + %.2856 =l mul %.2855, 4 + %.2857 =l add %.2152, %.2856 + storew %.2853, %.2857 +@for_cont.1250 + %.2858 =w loadsw %.2840 + %.2859 =w add %.2858, 1 + storew %.2859, %.2840 + jmp @for_cond.1248 +@for_join.1251 + %.2860 =w copy 0 + %.2861 =l copy $g_130 + %.2862 =l mul 4, 1 + %.2863 =l add %.2861, %.2862 + %.2864 =l copy %.2863 + storew %.2860, %.2864 +@for_cond.1252 + %.2865 =l copy $g_130 + %.2866 =l mul 4, 1 + %.2867 =l add %.2865, %.2866 + %.2868 =l copy %.2867 + %.2869 =w loaduw %.2868 + %.2870 =w copy 9 + %.2871 =w cugew %.2869, %.2870 + jnz %.2871, @for_body.1253, @for_join.1255 +@for_body.1253 + %.2873 =l add %.2872, 0 + %.2874 =w copy 0 + storeb %.2874, %.2873 + %.2876 =l add %.2875, 0 + %.2877 =l extsw 2 + %.2878 =l mul %.2877, 360 + %.2879 =l add %.250, %.2878 + %.2880 =l extsw 1 + %.2881 =l mul %.2880, 120 + %.2882 =l add %.2879, %.2881 + %.2883 =l extsw 1 + %.2884 =l mul %.2883, 20 + %.2885 =l add %.2882, %.2884 + storel %.2885, %.2876 + %.2887 =l add %.2886, 0 + %.2888 =l extsw 0 + %.2889 =l sub %.2888, 1 + %.2890 =w copy %.2889 + storew %.2890, %.2887 + %.2892 =l add %.2891, 0 + storel $g_201, %.2892 + %.2894 =l add %.2893, 0 + storel %.2130, %.2894 + %.2896 =l add %.2895, 0 + storel %.248, %.2896 + %.2898 =l add %.2897, 0 + storel %.2132, %.2898 + %.2899 =l add %.2897, 8 + %.2900 =l extsw 0 + %.2901 =l copy %.2900 + storel %.2901, %.2899 + %.2902 =l add %.2897, 16 + storel %.2132, %.2902 + %.2903 =l add %.2897, 24 + storel %.2132, %.2903 + %.2904 =l add %.2897, 32 + storel %.2132, %.2904 + %.2905 =l add %.2897, 40 + storel %.2132, %.2905 + %.2906 =l add %.2897, 48 + storel %.2132, %.2906 + %.2907 =l add %.2897, 56 + %.2908 =l extsw 0 + %.2909 =l copy %.2908 + storel %.2909, %.2907 + %.2910 =l add %.2897, 64 + storel %.2132, %.2910 + %.2911 =l add %.2897, 72 + storel %.2132, %.2911 + %.2912 =l add %.2897, 80 + storel %.2132, %.2912 + %.2913 =l add %.2897, 88 + storel %.2132, %.2913 + %.2914 =l add %.2897, 96 + storel %.2132, %.2914 + %.2915 =l add %.2897, 104 + %.2916 =l extsw 0 + %.2917 =l copy %.2916 + storel %.2917, %.2915 + %.2918 =l add %.2897, 112 + storel %.2132, %.2918 + %.2919 =l add %.2897, 120 + storel %.2132, %.2919 + %.2920 =l add %.2897, 128 + storel %.2132, %.2920 + %.2921 =l add %.2897, 136 + storel %.2132, %.2921 + %.2922 =l add %.2897, 144 + storel %.2132, %.2922 + %.2923 =l add %.2897, 152 + %.2924 =l extsw 0 + %.2925 =l copy %.2924 + storel %.2925, %.2923 + %.2926 =l add %.2897, 160 + storel %.2132, %.2926 + %.2927 =l add %.2897, 168 + storel %.2132, %.2927 + %.2928 =l add %.2897, 176 + storel %.2132, %.2928 + %.2929 =l add %.2897, 184 + storel %.2132, %.2929 + %.2930 =l add %.2897, 192 + storel %.2132, %.2930 + %.2931 =l add %.2897, 200 + %.2932 =l extsw 0 + %.2933 =l copy %.2932 + storel %.2933, %.2931 + %.2934 =l add %.2897, 208 + storel %.2132, %.2934 + %.2936 =l add %.2935, 0 + %.2937 =w copy 8 + storeb %.2937, %.2936 + %.2938 =l add %.2935, 1 + %.2939 =w copy 1 + storeb %.2939, %.2938 + %.2940 =l add %.2935, 2 + %.2941 =w copy 1 + storeb %.2941, %.2940 + %.2942 =l add %.2935, 3 + %.2943 =w copy 8 + storeb %.2943, %.2942 + %.2944 =l add %.2935, 4 + %.2945 =w copy 176 + storeb %.2945, %.2944 + %.2946 =l add %.2935, 5 + %.2947 =w copy 178 + storeb %.2947, %.2946 + %.2948 =l add %.2935, 6 + %.2949 =w copy 75 + storeb %.2949, %.2948 + %.2950 =l add %.2935, 7 + %.2951 =w copy 15 + storeb %.2951, %.2950 + %.2952 =l add %.2935, 8 + %.2953 =w copy 1 + storeb %.2953, %.2952 + %.2954 =l add %.2935, 9 + %.2955 =w copy 1 + storeb %.2955, %.2954 + %.2956 =l add %.2935, 10 + %.2957 =w copy 140 + storeb %.2957, %.2956 + %.2958 =l add %.2935, 11 + %.2959 =w copy 140 + storeb %.2959, %.2958 + %.2960 =l add %.2935, 12 + %.2961 =w copy 1 + storeb %.2961, %.2960 + %.2962 =l add %.2935, 13 + %.2963 =w copy 1 + storeb %.2963, %.2962 + %.2964 =l add %.2935, 14 + %.2965 =w copy 140 + storeb %.2965, %.2964 + %.2966 =l add %.2935, 15 + %.2967 =w copy 140 + storeb %.2967, %.2966 + %.2968 =l add %.2935, 16 + %.2969 =w copy 49 + storeb %.2969, %.2968 + %.2970 =l add %.2935, 17 + %.2971 =w copy 49 + storeb %.2971, %.2970 + %.2972 =l add %.2935, 18 + %.2973 =w copy 176 + storeb %.2973, %.2972 + %.2974 =l add %.2935, 19 + %.2975 =l extsw 0 + %.2976 =l sub %.2975, 1 + %.2977 =w copy %.2976 + storeb %.2977, %.2974 + %.2978 =l add %.2935, 20 + %.2979 =w copy 8 + storeb %.2979, %.2978 + %.2980 =l add %.2935, 21 + %.2981 =l extsw 0 + %.2982 =l sub %.2981, 8 + %.2983 =w copy %.2982 + storeb %.2983, %.2980 + %.2984 =l add %.2935, 22 + %.2985 =w copy 75 + storeb %.2985, %.2984 + %.2986 =l add %.2935, 23 + %.2987 =w copy 49 + storeb %.2987, %.2986 + %.2988 =l add %.2935, 24 + %.2989 =w copy 1 + storeb %.2989, %.2988 + %.2990 =l add %.2935, 25 + %.2991 =w copy 178 + storeb %.2991, %.2990 + %.2992 =l add %.2935, 26 + %.2993 =w copy 8 + storeb %.2993, %.2992 + %.2994 =l add %.2935, 27 + %.2995 =w copy 1 + storeb %.2995, %.2994 + %.2996 =l add %.2935, 28 + %.2997 =w copy 75 + storeb %.2997, %.2996 + %.2998 =l add %.2935, 29 + %.2999 =w copy 8 + storeb %.2999, %.2998 + %.3000 =l add %.2935, 30 + %.3001 =w copy 140 + storeb %.3001, %.3000 + %.3002 =l add %.2935, 31 + %.3003 =w copy 49 + storeb %.3003, %.3002 + %.3004 =l add %.2935, 32 + %.3005 =w copy 178 + storeb %.3005, %.3004 + %.3006 =l add %.2935, 33 + %.3007 =w copy 242 + storeb %.3007, %.3006 + %.3008 =l add %.2935, 34 + %.3009 =w copy 1 + storeb %.3009, %.3008 + %.3010 =l add %.2935, 35 + %.3011 =l extsw 0 + %.3012 =l sub %.3011, 1 + %.3013 =w copy %.3012 + storeb %.3013, %.3010 + %.3014 =l add %.2935, 36 + %.3015 =w copy 1 + storeb %.3015, %.3014 + %.3016 =l add %.2935, 37 + %.3017 =w copy 242 + storeb %.3017, %.3016 + %.3018 =l add %.2935, 38 + %.3019 =w copy 178 + storeb %.3019, %.3018 + %.3020 =l add %.2935, 39 + %.3021 =w copy 140 + storeb %.3021, %.3020 + %.3022 =l add %.2935, 40 + %.3023 =w copy 1 + storeb %.3023, %.3022 + %.3024 =l add %.2935, 41 + %.3025 =l extsw 0 + %.3026 =l sub %.3025, 1 + %.3027 =w copy %.3026 + storeb %.3027, %.3024 + %.3028 =l add %.2935, 42 + %.3029 =w copy 121 + storeb %.3029, %.3028 + %.3030 =l add %.2935, 43 + %.3031 =w copy 140 + storeb %.3031, %.3030 + %.3032 =l add %.2935, 44 + %.3033 =l extsw 0 + %.3034 =l sub %.3033, 1 + %.3035 =w copy %.3034 + storeb %.3035, %.3032 + %.3036 =l add %.2935, 45 + %.3037 =w copy 1 + storeb %.3037, %.3036 + %.3038 =l add %.2935, 46 + %.3039 =w copy 75 + storeb %.3039, %.3038 + %.3040 =l add %.2935, 47 + %.3041 =w copy 1 + storeb %.3041, %.3040 + %.3042 =l add %.2935, 48 + %.3043 =w copy 0 + storeb %.3043, %.3042 + %.3044 =l add %.2935, 49 + %.3045 =w copy 1 + storeb %.3045, %.3044 + %.3046 =l add %.2935, 50 + %.3047 =w copy 1 + storeb %.3047, %.3046 + %.3048 =l add %.2935, 51 + %.3049 =w copy 242 + storeb %.3049, %.3048 + %.3050 =l add %.2935, 52 + %.3051 =l extsw 0 + %.3052 =l sub %.3051, 1 + %.3053 =w copy %.3052 + storeb %.3053, %.3050 + %.3054 =l add %.2935, 53 + %.3055 =w copy 8 + storeb %.3055, %.3054 + %.3056 =l add %.2935, 54 + %.3057 =w copy 8 + storeb %.3057, %.3056 + %.3058 =l add %.2935, 55 + %.3059 =l extsw 0 + %.3060 =l sub %.3059, 1 + %.3061 =w copy %.3060 + storeb %.3061, %.3058 + %.3062 =l add %.2935, 56 + %.3063 =w copy 1 + storeb %.3063, %.3062 + %.3064 =l add %.2935, 57 + %.3065 =w copy 140 + storeb %.3065, %.3064 + %.3066 =l add %.2935, 58 + %.3067 =w copy 140 + storeb %.3067, %.3066 + %.3068 =l add %.2935, 59 + %.3069 =w copy 1 + storeb %.3069, %.3068 + %.3070 =l add %.2935, 60 + %.3071 =w copy 1 + storeb %.3071, %.3070 + %.3072 =l add %.2935, 61 + %.3073 =w copy 0 + storeb %.3073, %.3072 + %.3074 =l add %.2935, 62 + %.3075 =w copy 49 + storeb %.3075, %.3074 + %.3076 =l add %.2935, 63 + %.3077 =w copy 242 + storeb %.3077, %.3076 + %.3078 =l add %.2935, 64 + %.3079 =w copy 178 + storeb %.3079, %.3078 + %.3080 =l add %.2935, 65 + %.3081 =w copy 49 + storeb %.3081, %.3080 + %.3082 =l add %.2935, 66 + %.3083 =w copy 0 + storeb %.3083, %.3082 + %.3084 =l add %.2935, 67 + %.3085 =w copy 140 + storeb %.3085, %.3084 + %.3086 =l add %.2935, 68 + %.3087 =w copy 75 + storeb %.3087, %.3086 + %.3088 =l add %.2935, 69 + %.3089 =w copy 176 + storeb %.3089, %.3088 + %.3090 =l add %.2935, 70 + %.3091 =w copy 140 + storeb %.3091, %.3090 + %.3092 =l add %.2935, 71 + %.3093 =w copy 178 + storeb %.3093, %.3092 + %.3094 =l add %.2935, 72 + %.3095 =w copy 1 + storeb %.3095, %.3094 + %.3096 =l add %.2935, 73 + %.3097 =w copy 49 + storeb %.3097, %.3096 + %.3098 =l add %.2935, 74 + %.3099 =w copy 1 + storeb %.3099, %.3098 + %.3100 =l add %.2935, 75 + %.3101 =w copy 0 + storeb %.3101, %.3100 + %.3102 =l add %.2935, 76 + %.3103 =w copy 8 + storeb %.3103, %.3102 + %.3104 =l add %.2935, 77 + %.3105 =w copy 0 + storeb %.3105, %.3104 + %.3106 =l add %.2935, 78 + %.3107 =w copy 1 + storeb %.3107, %.3106 + %.3108 =l add %.2935, 79 + %.3109 =w copy 49 + storeb %.3109, %.3108 + %.3110 =l add %.2935, 80 + %.3111 =w copy 49 + storeb %.3111, %.3110 + %.3112 =l add %.2935, 81 + %.3113 =w copy 140 + storeb %.3113, %.3112 + %.3114 =l add %.2935, 82 + %.3115 =w copy 8 + storeb %.3115, %.3114 + %.3116 =l add %.2935, 83 + %.3117 =w copy 75 + storeb %.3117, %.3116 + %.3118 =l add %.2935, 84 + %.3119 =w copy 1 + storeb %.3119, %.3118 + %.3120 =l add %.2935, 85 + %.3121 =w copy 8 + storeb %.3121, %.3120 + %.3122 =l add %.2935, 86 + %.3123 =w copy 178 + storeb %.3123, %.3122 + %.3124 =l add %.2935, 87 + %.3125 =w copy 1 + storeb %.3125, %.3124 + %.3126 =l add %.2935, 88 + %.3127 =w copy 178 + storeb %.3127, %.3126 + %.3128 =l add %.2935, 89 + %.3129 =w copy 1 + storeb %.3129, %.3128 + %.3130 =l add %.2935, 90 + %.3131 =l extsw 0 + %.3132 =l sub %.3131, 1 + %.3133 =w copy %.3132 + storeb %.3133, %.3130 + %.3134 =l add %.2935, 91 + %.3135 =l extsw 0 + %.3136 =l sub %.3135, 1 + %.3137 =w copy %.3136 + storeb %.3137, %.3134 + %.3138 =l add %.2935, 92 + %.3139 =w copy 49 + storeb %.3139, %.3138 + %.3140 =l add %.2935, 93 + %.3141 =w copy 1 + storeb %.3141, %.3140 + %.3142 =l add %.2935, 94 + %.3143 =w copy 178 + storeb %.3143, %.3142 + %.3144 =l add %.2935, 95 + %.3145 =w copy 178 + storeb %.3145, %.3144 + %.3146 =l add %.2935, 96 + %.3147 =w copy 242 + storeb %.3147, %.3146 + %.3148 =l add %.2935, 97 + %.3149 =l extsw 0 + %.3150 =l sub %.3149, 1 + %.3151 =w copy %.3150 + storeb %.3151, %.3148 + %.3152 =l add %.2935, 98 + %.3153 =w copy 8 + storeb %.3153, %.3152 + %.3154 =l add %.2935, 99 + %.3155 =w copy 8 + storeb %.3155, %.3154 + %.3156 =l add %.2935, 100 + %.3157 =l extsw 0 + %.3158 =l sub %.3157, 1 + %.3159 =w copy %.3158 + storeb %.3159, %.3156 + %.3160 =l add %.2935, 101 + %.3161 =w copy 242 + storeb %.3161, %.3160 + %.3162 =l add %.2935, 102 + %.3163 =w copy 1 + storeb %.3163, %.3162 + %.3164 =l add %.2935, 103 + %.3165 =w copy 1 + storeb %.3165, %.3164 + %.3166 =l add %.2935, 104 + %.3167 =l extsw 0 + %.3168 =l sub %.3167, 1 + %.3169 =w copy %.3168 + storeb %.3169, %.3166 + %.3170 =l add %.2935, 105 + %.3171 =w copy 242 + storeb %.3171, %.3170 + %.3172 =l add %.2935, 106 + %.3173 =w copy 1 + storeb %.3173, %.3172 + %.3174 =l add %.2935, 107 + %.3175 =w copy 1 + storeb %.3175, %.3174 + %.3176 =l add %.2935, 108 + %.3177 =w copy 0 + storeb %.3177, %.3176 + %.3178 =l add %.2935, 109 + %.3179 =w copy 8 + storeb %.3179, %.3178 + %.3180 =l add %.2935, 110 + %.3181 =w copy 140 + storeb %.3181, %.3180 + %.3182 =l add %.2935, 111 + %.3183 =w copy 0 + storeb %.3183, %.3182 + %.3184 =l add %.2935, 112 + %.3185 =w copy 1 + storeb %.3185, %.3184 + %.3186 =l add %.2935, 113 + %.3187 =w copy 178 + storeb %.3187, %.3186 + %.3188 =l add %.2935, 114 + %.3189 =w copy 0 + storeb %.3189, %.3188 + %.3190 =l add %.2935, 115 + %.3191 =w copy 1 + storeb %.3191, %.3190 + %.3192 =l add %.2935, 116 + %.3193 =w copy 49 + storeb %.3193, %.3192 + %.3194 =l add %.2935, 117 + %.3195 =l extsw 0 + %.3196 =l sub %.3195, 8 + %.3197 =w copy %.3196 + storeb %.3197, %.3194 + %.3198 =l add %.2935, 118 + %.3199 =w copy 49 + storeb %.3199, %.3198 + %.3200 =l add %.2935, 119 + %.3201 =w copy 1 + storeb %.3201, %.3200 + %.3202 =l add %.2935, 120 + %.3203 =w copy 140 + storeb %.3203, %.3202 + %.3204 =l add %.2935, 121 + %.3205 =w copy 49 + storeb %.3205, %.3204 + %.3206 =l add %.2935, 122 + %.3207 =w copy 140 + storeb %.3207, %.3206 + %.3208 =l add %.2935, 123 + %.3209 =w copy 8 + storeb %.3209, %.3208 + %.3210 =l add %.2935, 124 + %.3211 =w copy 75 + storeb %.3211, %.3210 + %.3212 =l add %.2935, 125 + %.3213 =w copy 1 + storeb %.3213, %.3212 + %.3214 =l add %.2935, 126 + %.3215 =w copy 8 + storeb %.3215, %.3214 + %.3216 =l add %.2935, 127 + %.3217 =w copy 178 + storeb %.3217, %.3216 + %.3218 =l add %.2935, 128 + %.3219 =w copy 49 + storeb %.3219, %.3218 + %.3220 =l add %.2935, 129 + %.3221 =w copy 1 + storeb %.3221, %.3220 + %.3222 =l add %.2935, 130 + %.3223 =w copy 1 + storeb %.3223, %.3222 + %.3224 =l add %.2935, 131 + %.3225 =l extsw 0 + %.3226 =l sub %.3225, 1 + %.3227 =w copy %.3226 + storeb %.3227, %.3224 + %.3228 =l add %.2935, 132 + %.3229 =w copy 119 + storeb %.3229, %.3228 + %.3230 =l add %.2935, 133 + %.3231 =w copy 0 + storeb %.3231, %.3230 + %.3232 =l add %.2935, 134 + %.3233 =w copy 75 + storeb %.3233, %.3232 + %.3234 =l add %.2935, 135 + %.3235 =w copy 1 + storeb %.3235, %.3234 + %.3236 =l add %.2935, 136 + %.3237 =w copy 49 + storeb %.3237, %.3236 + %.3238 =l add %.2935, 137 + %.3239 =w copy 178 + storeb %.3239, %.3238 + %.3240 =l add %.2935, 138 + %.3241 =w copy 121 + storeb %.3241, %.3240 + %.3242 =l add %.2935, 139 + %.3243 =w copy 75 + storeb %.3243, %.3242 + %.3244 =l add %.2935, 140 + %.3245 =w copy 75 + storeb %.3245, %.3244 + %.3246 =l add %.2935, 141 + %.3247 =w copy 121 + storeb %.3247, %.3246 + %.3248 =l add %.2935, 142 + %.3249 =w copy 178 + storeb %.3249, %.3248 + %.3250 =l add %.2935, 143 + %.3251 =w copy 49 + storeb %.3251, %.3250 + %.3252 =l add %.2935, 144 + %.3253 =w copy 140 + storeb %.3253, %.3252 + %.3254 =l add %.2935, 145 + %.3255 =w copy 1 + storeb %.3255, %.3254 + %.3256 =l add %.2935, 146 + %.3257 =w copy 1 + storeb %.3257, %.3256 + %.3258 =l add %.2935, 147 + %.3259 =w copy 0 + storeb %.3259, %.3258 + %.3260 =l add %.2935, 148 + %.3261 =w copy 49 + storeb %.3261, %.3260 + %.3262 =l add %.2935, 149 + %.3263 =w copy 242 + storeb %.3263, %.3262 + %.3264 =l add %.2935, 150 + %.3265 =w copy 140 + storeb %.3265, %.3264 + %.3266 =l add %.2935, 151 + %.3267 =w copy 178 + storeb %.3267, %.3266 + %.3268 =l add %.2935, 152 + %.3269 =w copy 1 + storeb %.3269, %.3268 + %.3270 =l add %.2935, 153 + %.3271 =w copy 0 + storeb %.3271, %.3270 + %.3272 =l add %.2935, 154 + %.3273 =w copy 8 + storeb %.3273, %.3272 + %.3274 =l add %.2935, 155 + %.3275 =w copy 140 + storeb %.3275, %.3274 + %.3276 =l add %.2935, 156 + %.3277 =w copy 0 + storeb %.3277, %.3276 + %.3278 =l add %.2935, 157 + %.3279 =w copy 242 + storeb %.3279, %.3278 + %.3280 =l add %.2935, 158 + %.3281 =w copy 75 + storeb %.3281, %.3280 + %.3282 =l add %.2935, 159 + %.3283 =w copy 242 + storeb %.3283, %.3282 + %.3284 =l add %.2935, 160 + %.3285 =l extsw 0 + %.3286 =l sub %.3285, 1 + %.3287 =w copy %.3286 + storeb %.3287, %.3284 + %.3288 =l add %.2935, 161 + %.3289 =w copy 1 + storeb %.3289, %.3288 + %.3290 =l add %.2935, 162 + %.3291 =w copy 176 + storeb %.3291, %.3290 + %.3292 =l add %.2935, 163 + %.3293 =w copy 1 + storeb %.3293, %.3292 + %.3294 =l add %.2935, 164 + %.3295 =l extsw 0 + %.3296 =l sub %.3295, 1 + %.3297 =w copy %.3296 + storeb %.3297, %.3294 + %.3298 =l add %.2935, 165 + %.3299 =w copy 121 + storeb %.3299, %.3298 + %.3300 =l add %.2935, 166 + %.3301 =w copy 140 + storeb %.3301, %.3300 + %.3302 =l add %.2935, 167 + %.3303 =l extsw 0 + %.3304 =l sub %.3303, 1 + %.3305 =w copy %.3304 + storeb %.3305, %.3302 + %.3306 =l add %.2935, 168 + %.3307 =w copy 242 + storeb %.3307, %.3306 + %.3308 =l add %.2935, 169 + %.3309 =w copy 178 + storeb %.3309, %.3308 + %.3310 =l add %.2935, 170 + %.3311 =w copy 140 + storeb %.3311, %.3310 + %.3312 =l add %.2935, 171 + %.3313 =w copy 242 + storeb %.3313, %.3312 + %.3314 =l add %.2935, 172 + %.3315 =w copy 49 + storeb %.3315, %.3314 + %.3316 =l add %.2935, 173 + %.3317 =w copy 0 + storeb %.3317, %.3316 + %.3318 =l add %.2935, 174 + %.3319 =w copy 1 + storeb %.3319, %.3318 + %.3320 =l add %.2935, 175 + %.3321 =w copy 1 + storeb %.3321, %.3320 + %.3322 =l add %.2935, 176 + %.3323 =w copy 178 + storeb %.3323, %.3322 + %.3324 =l add %.2935, 177 + %.3325 =w copy 1 + storeb %.3325, %.3324 + %.3326 =l add %.2935, 178 + %.3327 =w copy 140 + storeb %.3327, %.3326 + %.3328 =l add %.2935, 179 + %.3329 =w copy 140 + storeb %.3329, %.3328 + %.3330 =l add %.2935, 180 + %.3331 =w copy 1 + storeb %.3331, %.3330 + %.3332 =l add %.2935, 181 + %.3333 =w copy 1 + storeb %.3333, %.3332 + %.3334 =l add %.2935, 182 + %.3335 =w copy 140 + storeb %.3335, %.3334 + %.3336 =l add %.2935, 183 + %.3337 =w copy 140 + storeb %.3337, %.3336 + %.3338 =l add %.2935, 184 + %.3339 =w copy 49 + storeb %.3339, %.3338 + %.3340 =l add %.2935, 185 + %.3341 =w copy 49 + storeb %.3341, %.3340 + %.3342 =l add %.2935, 186 + %.3343 =w copy 176 + storeb %.3343, %.3342 + %.3344 =l add %.2935, 187 + %.3345 =l extsw 0 + %.3346 =l sub %.3345, 1 + %.3347 =w copy %.3346 + storeb %.3347, %.3344 + %.3348 =l add %.2935, 188 + %.3349 =w copy 8 + storeb %.3349, %.3348 + %.3350 =l add %.2935, 189 + %.3351 =l extsw 0 + %.3352 =l sub %.3351, 8 + %.3353 =w copy %.3352 + storeb %.3353, %.3350 + %.3354 =l add %.2935, 190 + %.3355 =w copy 75 + storeb %.3355, %.3354 + %.3356 =l add %.2935, 191 + %.3357 =w copy 49 + storeb %.3357, %.3356 + %.3358 =l add %.2935, 192 + %.3359 =w copy 1 + storeb %.3359, %.3358 + %.3360 =l add %.2935, 193 + %.3361 =w copy 178 + storeb %.3361, %.3360 + %.3362 =l add %.2935, 194 + %.3363 =w copy 8 + storeb %.3363, %.3362 + %.3364 =l add %.2935, 195 + %.3365 =w copy 1 + storeb %.3365, %.3364 + %.3366 =l add %.2935, 196 + %.3367 =w copy 75 + storeb %.3367, %.3366 + %.3368 =l add %.2935, 197 + %.3369 =w copy 8 + storeb %.3369, %.3368 + %.3370 =l add %.2935, 198 + %.3371 =w copy 140 + storeb %.3371, %.3370 + %.3372 =l add %.2935, 199 + %.3373 =w copy 119 + storeb %.3373, %.3372 + %.3374 =l add %.2935, 200 + %.3375 =w copy 0 + storeb %.3375, %.3374 + %.3376 =l add %.2935, 201 + %.3377 =w copy 0 + storeb %.3377, %.3376 + %.3378 =l add %.2935, 202 + %.3379 =w copy 15 + storeb %.3379, %.3378 + %.3380 =l add %.2935, 203 + %.3381 =w copy 8 + storeb %.3381, %.3380 + %.3382 =l add %.2935, 204 + %.3383 =w copy 15 + storeb %.3383, %.3382 + %.3384 =l add %.2935, 205 + %.3385 =w copy 0 + storeb %.3385, %.3384 + %.3386 =l add %.2935, 206 + %.3387 =w copy 0 + storeb %.3387, %.3386 + %.3388 =l add %.2935, 207 + %.3389 =l extsw 0 + %.3390 =l sub %.3389, 8 + %.3391 =w copy %.3390 + storeb %.3391, %.3388 + %.3392 =l add %.2935, 208 + %.3393 =w copy 140 + storeb %.3393, %.3392 + %.3394 =l add %.2935, 209 + %.3395 =w copy 8 + storeb %.3395, %.3394 + %.3396 =l add %.2935, 210 + %.3397 =w copy 75 + storeb %.3397, %.3396 + %.3398 =l add %.2935, 211 + %.3399 =w copy 1 + storeb %.3399, %.3398 + %.3400 =l add %.2935, 212 + %.3401 =w copy 8 + storeb %.3401, %.3400 + %.3402 =l add %.2935, 213 + %.3403 =w copy 178 + storeb %.3403, %.3402 + %.3404 =l add %.2935, 214 + %.3405 =w copy 1 + storeb %.3405, %.3404 + %.3406 =l add %.2935, 215 + %.3407 =w copy 140 + storeb %.3407, %.3406 + %.3408 =l add %.2935, 216 + %.3409 =w copy 121 + storeb %.3409, %.3408 + %.3410 =l add %.2935, 217 + %.3411 =w copy 140 + storeb %.3411, %.3410 + %.3412 =l add %.2935, 218 + %.3413 =l extsw 0 + %.3414 =l sub %.3413, 1 + %.3415 =w copy %.3414 + storeb %.3415, %.3412 + %.3416 =l add %.2935, 219 + %.3417 =w copy 0 + storeb %.3417, %.3416 + %.3418 =l add %.2935, 220 + %.3419 =w copy 8 + storeb %.3419, %.3418 + %.3420 =l add %.2935, 221 + %.3421 =w copy 176 + storeb %.3421, %.3420 + %.3422 =l add %.2935, 222 + %.3423 =w copy 176 + storeb %.3423, %.3422 + %.3424 =l add %.2935, 223 + %.3425 =w copy 8 + storeb %.3425, %.3424 + %.3426 =l add %.2935, 224 + %.3427 =w copy 140 + storeb %.3427, %.3426 + %.3428 =l add %.2935, 225 + %.3429 =l extsw 0 + %.3430 =l sub %.3429, 8 + %.3431 =w copy %.3430 + storeb %.3431, %.3428 + %.3432 =l add %.2935, 226 + %.3433 =l extsw 0 + %.3434 =l sub %.3433, 8 + %.3435 =w copy %.3434 + storeb %.3435, %.3432 + %.3436 =l add %.2935, 227 + %.3437 =w copy 140 + storeb %.3437, %.3436 + %.3438 =l add %.2935, 228 + %.3439 =w copy 15 + storeb %.3439, %.3438 + %.3440 =l add %.2935, 229 + %.3441 =w copy 121 + storeb %.3441, %.3440 + %.3442 =l add %.2935, 230 + %.3443 =w copy 119 + storeb %.3443, %.3442 + %.3444 =l add %.2935, 231 + %.3445 =w copy 0 + storeb %.3445, %.3444 + %.3446 =l add %.2935, 232 + %.3447 =w copy 0 + storeb %.3447, %.3446 + %.3448 =l add %.2935, 233 + %.3449 =w copy 119 + storeb %.3449, %.3448 + %.3450 =l add %.2935, 234 + %.3451 =l extsw 0 + %.3452 =l sub %.3451, 1 + %.3453 =w copy %.3452 + storeb %.3453, %.3450 + %.3454 =l add %.2935, 235 + %.3455 =w copy 1 + storeb %.3455, %.3454 + %.3456 =l add %.2935, 236 + %.3457 =w copy 1 + storeb %.3457, %.3456 + %.3458 =l add %.2935, 237 + %.3459 =w copy 49 + storeb %.3459, %.3458 + %.3460 =l add %.2935, 238 + %.3461 =w copy 1 + storeb %.3461, %.3460 + %.3462 =l add %.2935, 239 + %.3463 =w copy 0 + storeb %.3463, %.3462 + %.3465 =l add %.3464, 0 + %.3466 =l extsw 0 + %.3467 =l copy %.3466 + storel %.3467, %.3465 + %.3469 =l add %.3468, 0 + storel $g_80, %.3469 + %.3473 =l loadl $g_38 + %.3474 =l loadl %.3473 + %.3475 =w loadsw %.3474 + storew %.3475, %.2122 + %.3476 =w loaduw %.4 + %.3477 =w cnew %.3476, 0 + jnz %.3477, @logic_right.1256, @logic_join.1257 +@logic_right.1256 + %.3478 =w loadub %.6 + %.3479 =w extub %.3478 + %.3480 =l extsw 2 + %.3481 =l mul %.3480, 1 + %.3482 =l add $g_132, %.3481 + %.3483 =w loadsb %.3482 + %.3484 =w extsb %.3483 + %.3485 =w csgew %.3479, %.3484 + %.3486 =w cnew %.3485, 0 +@logic_join.1257 + %.3487 =w phi @for_body.1253 %.3477, @logic_right.1256 %.3486 + %.3488 =w copy %.3487 + %.3489 =w loadsw %.2122 + %.3490 =l extsw %.3489 + %.3491 =w cugel %.3490, 1 + %.3492 =w copy %.3491 + %.3493 =w call $safe_add_func_uint32_t_u_u(w %.3488, w %.3492) + %.3494 =w copy %.3493 + storew %.3494, %.2125 + %.3495 =w cnew %.3494, 0 + jnz %.3495, @if_true.1258, @if_false.1259 +@if_true.1258 + %.3497 =l add %.3496, 0 + storel %.248, %.3497 + %.3499 =l add %.3498, 0 + %.3500 =w copy 0 + storew %.3500, %.3499 + %.3501 =l add %.3498, 4 + %.3502 =w copy 535778462 + storew %.3502, %.3501 + %.3503 =l add %.3498, 8 + %.3504 =l extsw 0 + %.3505 =l sub %.3504, 1 + %.3506 =w copy %.3505 + storew %.3506, %.3503 + %.3507 =l add %.3498, 12 + %.3508 =w copy 0 + storew %.3508, %.3507 + %.3509 =l add %.3498, 16 + %.3510 =l extsw 0 + %.3511 =l sub %.3510, 10 + %.3512 =w copy %.3511 + storew %.3512, %.3509 + %.3513 =l add %.3498, 20 + %.3514 =w copy 4237820494 + storew %.3514, %.3513 + %.3515 =l add %.3498, 24 + %.3516 =w copy 0 + storew %.3516, %.3515 + %.3517 =l add %.3498, 28 + %.3518 =w copy 4237820494 + storew %.3518, %.3517 + %.3519 =l add %.3498, 32 + %.3520 =l extsw 0 + %.3521 =l sub %.3520, 10 + %.3522 =w copy %.3521 + storew %.3522, %.3519 + %.3523 =l add %.3498, 36 + %.3524 =w copy 0 + storew %.3524, %.3523 + %.3525 =l add %.3498, 40 + %.3526 =l extsw 0 + %.3527 =l sub %.3526, 1 + %.3528 =w copy %.3527 + storew %.3528, %.3525 + %.3529 =l add %.3498, 44 + %.3530 =w copy 535778462 + storew %.3530, %.3529 + %.3531 =l add %.3498, 48 + %.3532 =w copy 0 + storew %.3532, %.3531 + %.3533 =l add %.3498, 52 + %.3534 =w copy 919506955 + storew %.3534, %.3533 + %.3535 =l add %.3498, 56 + %.3536 =w copy 430035244 + storew %.3536, %.3535 + %.3537 =l add %.3498, 60 + %.3538 =w copy 0 + storew %.3538, %.3537 + %.3539 =l add %.3498, 64 + %.3540 =w copy 430035244 + storew %.3540, %.3539 + %.3541 =l add %.3498, 68 + %.3542 =w copy 919506955 + storew %.3542, %.3541 + %.3543 =l add %.3498, 72 + %.3544 =w copy 0 + storew %.3544, %.3543 + %.3545 =l add %.3498, 76 + %.3546 =w copy 535778462 + storew %.3546, %.3545 + %.3547 =l add %.3498, 80 + %.3548 =l extsw 0 + %.3549 =l sub %.3548, 1 + %.3550 =w copy %.3549 + storew %.3550, %.3547 + %.3551 =l add %.3498, 84 + %.3552 =w copy 0 + storew %.3552, %.3551 + %.3553 =l add %.3498, 88 + %.3554 =l extsw 0 + %.3555 =l sub %.3554, 10 + %.3556 =w copy %.3555 + storew %.3556, %.3553 + %.3557 =l add %.3498, 92 + %.3558 =w copy 4237820494 + storew %.3558, %.3557 + %.3559 =l add %.3498, 96 + %.3560 =w copy 0 + storew %.3560, %.3559 + %.3561 =l add %.3498, 100 + %.3562 =w copy 4237820494 + storew %.3562, %.3561 + %.3563 =l add %.3498, 104 + %.3564 =l extsw 0 + %.3565 =l sub %.3564, 10 + %.3566 =w copy %.3565 + storew %.3566, %.3563 + %.3567 =l add %.3498, 108 + %.3568 =w copy 0 + storew %.3568, %.3567 + %.3569 =l add %.3498, 112 + %.3570 =l extsw 0 + %.3571 =l sub %.3570, 1 + %.3572 =w copy %.3571 + storew %.3572, %.3569 + %.3573 =l add %.3498, 116 + %.3574 =w copy 535778462 + storew %.3574, %.3573 + %.3575 =l add %.3498, 120 + %.3576 =w copy 0 + storew %.3576, %.3575 + %.3577 =l add %.3498, 124 + %.3578 =w copy 919506955 + storew %.3578, %.3577 + %.3579 =l add %.3498, 128 + %.3580 =w copy 430035244 + storew %.3580, %.3579 + %.3581 =l add %.3498, 132 + %.3582 =w copy 0 + storew %.3582, %.3581 + %.3583 =l add %.3498, 136 + %.3584 =w copy 430035244 + storew %.3584, %.3583 + %.3585 =l add %.3498, 140 + %.3586 =w copy 919506955 + storew %.3586, %.3585 + %.3587 =l add %.3498, 144 + %.3588 =w copy 0 + storew %.3588, %.3587 + %.3589 =l add %.3498, 148 + %.3590 =w copy 535778462 + storew %.3590, %.3589 + %.3591 =l add %.3498, 152 + %.3592 =l extsw 0 + %.3593 =l sub %.3592, 1 + %.3594 =w copy %.3593 + storew %.3594, %.3591 + %.3595 =l add %.3498, 156 + %.3596 =w copy 0 + storew %.3596, %.3595 + %.3597 =l add %.3498, 160 + %.3598 =l extsw 0 + %.3599 =l sub %.3598, 10 + %.3600 =w copy %.3599 + storew %.3600, %.3597 + %.3601 =l add %.3498, 164 + %.3602 =w copy 4237820494 + storew %.3602, %.3601 + %.3603 =l add %.3498, 168 + %.3604 =w copy 0 + storew %.3604, %.3603 + %.3605 =l add %.3498, 172 + %.3606 =w copy 4237820494 + storew %.3606, %.3605 + %.3607 =l add %.3498, 176 + %.3608 =l extsw 0 + %.3609 =l sub %.3608, 10 + %.3610 =w copy %.3609 + storew %.3610, %.3607 + %.3611 =l add %.3498, 180 + %.3612 =w copy 0 + storew %.3612, %.3611 + %.3613 =l add %.3498, 184 + %.3614 =l extsw 0 + %.3615 =l sub %.3614, 1 + %.3616 =w copy %.3615 + storew %.3616, %.3613 + %.3617 =l add %.3498, 188 + %.3618 =w copy 535778462 + storew %.3618, %.3617 + %.3619 =l add %.3498, 192 + %.3620 =w copy 0 + storew %.3620, %.3619 + %.3621 =l add %.3498, 196 + %.3622 =w copy 919506955 + storew %.3622, %.3621 + %.3623 =l add %.3498, 200 + %.3624 =w copy 430035244 + storew %.3624, %.3623 + %.3625 =l add %.3498, 204 + %.3626 =w copy 0 + storew %.3626, %.3625 + %.3627 =l add %.3498, 208 + %.3628 =w copy 430035244 + storew %.3628, %.3627 + %.3629 =l add %.3498, 212 + %.3630 =w copy 919506955 + storew %.3630, %.3629 + %.3631 =l add %.3498, 216 + %.3632 =w copy 0 + storew %.3632, %.3631 + %.3633 =l add %.3498, 220 + %.3634 =w copy 535778462 + storew %.3634, %.3633 + %.3635 =l add %.3498, 224 + %.3636 =l extsw 0 + %.3637 =l sub %.3636, 1 + %.3638 =w copy %.3637 + storew %.3638, %.3635 + %.3639 =l add %.3498, 228 + %.3640 =w copy 0 + storew %.3640, %.3639 + %.3641 =l add %.3498, 232 + %.3642 =l extsw 0 + %.3643 =l sub %.3642, 10 + %.3644 =w copy %.3643 + storew %.3644, %.3641 + %.3645 =l add %.3498, 236 + %.3646 =w copy 4237820494 + storew %.3646, %.3645 + %.3647 =l add %.3498, 240 + %.3648 =w copy 0 + storew %.3648, %.3647 + %.3649 =l add %.3498, 244 + %.3650 =w copy 4237820494 + storew %.3650, %.3649 + %.3651 =l add %.3498, 248 + %.3652 =l extsw 0 + %.3653 =l sub %.3652, 10 + %.3654 =w copy %.3653 + storew %.3654, %.3651 + %.3655 =l add %.3498, 252 + %.3656 =w copy 0 + storew %.3656, %.3655 + %.3657 =l add %.3498, 256 + %.3658 =l extsw 0 + %.3659 =l sub %.3658, 1 + %.3660 =w copy %.3659 + storew %.3660, %.3657 + %.3661 =l add %.3498, 260 + %.3662 =w copy 535778462 + storew %.3662, %.3661 + %.3663 =l add %.3498, 264 + %.3664 =w copy 0 + storew %.3664, %.3663 + %.3665 =l add %.3498, 268 + %.3666 =w copy 919506955 + storew %.3666, %.3665 + %.3667 =l add %.3498, 272 + %.3668 =w copy 430035244 + storew %.3668, %.3667 + %.3669 =l add %.3498, 276 + %.3670 =w copy 0 + storew %.3670, %.3669 + %.3671 =l add %.3498, 280 + %.3672 =w copy 430035244 + storew %.3672, %.3671 + %.3673 =l add %.3498, 284 + %.3674 =w copy 919506955 + storew %.3674, %.3673 + %.3675 =l add %.3498, 288 + %.3676 =w copy 0 + storew %.3676, %.3675 + %.3677 =l add %.3498, 292 + %.3678 =w copy 535778462 + storew %.3678, %.3677 + %.3679 =l add %.3498, 296 + %.3680 =l extsw 0 + %.3681 =l sub %.3680, 1 + %.3682 =w copy %.3681 + storew %.3682, %.3679 + %.3683 =l add %.3498, 300 + %.3684 =w copy 0 + storew %.3684, %.3683 + %.3685 =l add %.3498, 304 + %.3686 =l extsw 0 + %.3687 =l sub %.3686, 10 + %.3688 =w copy %.3687 + storew %.3688, %.3685 + %.3689 =l add %.3498, 308 + %.3690 =w copy 4237820494 + storew %.3690, %.3689 + %.3691 =l add %.3498, 312 + %.3692 =w copy 0 + storew %.3692, %.3691 + %.3693 =l add %.3498, 316 + %.3694 =w copy 4237820494 + storew %.3694, %.3693 + %.3695 =l add %.3498, 320 + %.3696 =l extsw 0 + %.3697 =l sub %.3696, 10 + %.3698 =w copy %.3697 + storew %.3698, %.3695 + %.3700 =l add %.3699, 0 + %.3701 =l extsw 4 + %.3702 =l mul %.3701, 1 + %.3703 =l add $g_132, %.3702 + storel %.3703, %.3700 + %.3704 =l add %.3699, 8 + %.3705 =l extsw 0 + %.3706 =l copy %.3705 + storel %.3706, %.3704 + %.3707 =l add %.3699, 16 + %.3708 =l extsw 2 + %.3709 =l mul %.3708, 1 + %.3710 =l add $g_132, %.3709 + storel %.3710, %.3707 + %.3711 =l add %.3699, 24 + %.3712 =l extsw 0 + %.3713 =l copy %.3712 + storel %.3713, %.3711 + %.3714 =l add %.3699, 32 + %.3715 =l extsw 4 + %.3716 =l mul %.3715, 1 + %.3717 =l add $g_132, %.3716 + storel %.3717, %.3714 + %.3718 =l add %.3699, 40 + %.3719 =l extsw 0 + %.3720 =l copy %.3719 + storel %.3720, %.3718 + %.3721 =l add %.3699, 48 + %.3722 =l extsw 2 + %.3723 =l mul %.3722, 1 + %.3724 =l add $g_132, %.3723 + storel %.3724, %.3721 + %.3725 =l add %.3699, 56 + %.3726 =l extsw 0 + %.3727 =l copy %.3726 + storel %.3727, %.3725 + %.3728 =l add %.3699, 64 + %.3729 =l extsw 4 + %.3730 =l mul %.3729, 1 + %.3731 =l add $g_132, %.3730 + storel %.3731, %.3728 + %.3732 =l add %.3699, 72 + %.3733 =l extsw 0 + %.3734 =l copy %.3733 + storel %.3734, %.3732 + %.3735 =l add %.3699, 80 + %.3736 =l extsw 2 + %.3737 =l mul %.3736, 1 + %.3738 =l add $g_132, %.3737 + storel %.3738, %.3735 + %.3739 =l add %.3699, 88 + %.3740 =l extsw 0 + %.3741 =l copy %.3740 + storel %.3741, %.3739 + %.3742 =l add %.3699, 96 + %.3743 =l extsw 4 + %.3744 =l mul %.3743, 1 + %.3745 =l add $g_132, %.3744 + storel %.3745, %.3742 + %.3746 =l add %.3699, 104 + %.3747 =l extsw 0 + %.3748 =l copy %.3747 + storel %.3748, %.3746 + %.3749 =l add %.3699, 112 + %.3750 =l extsw 2 + %.3751 =l mul %.3750, 1 + %.3752 =l add $g_132, %.3751 + storel %.3752, %.3749 + %.3753 =l add %.3699, 120 + %.3754 =l extsw 0 + %.3755 =l copy %.3754 + storel %.3755, %.3753 + %.3756 =l add %.3699, 128 + %.3757 =l extsw 4 + %.3758 =l mul %.3757, 1 + %.3759 =l add $g_132, %.3758 + storel %.3759, %.3756 + %.3760 =l add %.3699, 136 + %.3761 =l extsw 0 + %.3762 =l copy %.3761 + storel %.3762, %.3760 + %.3763 =l add %.3699, 144 + %.3764 =l extsw 2 + %.3765 =l mul %.3764, 1 + %.3766 =l add $g_132, %.3765 + storel %.3766, %.3763 + %.3767 =l add %.3699, 152 + %.3768 =l extsw 0 + %.3769 =l copy %.3768 + storel %.3769, %.3767 + %.3770 =l add %.3699, 160 + %.3771 =l extsw 4 + %.3772 =l mul %.3771, 1 + %.3773 =l add $g_132, %.3772 + storel %.3773, %.3770 + %.3774 =l add %.3699, 168 + %.3775 =l extsw 0 + %.3776 =l copy %.3775 + storel %.3776, %.3774 + %.3777 =l add %.3699, 176 + %.3778 =l extsw 2 + %.3779 =l mul %.3778, 1 + %.3780 =l add $g_132, %.3779 + storel %.3780, %.3777 + %.3781 =l add %.3699, 184 + %.3782 =l extsw 0 + %.3783 =l copy %.3782 + storel %.3783, %.3781 + %.3784 =l add %.3699, 192 + %.3785 =l extsw 4 + %.3786 =l mul %.3785, 1 + %.3787 =l add $g_132, %.3786 + storel %.3787, %.3784 + %.3788 =l add %.3699, 200 + %.3789 =l extsw 0 + %.3790 =l copy %.3789 + storel %.3790, %.3788 + %.3791 =l add %.3699, 208 + %.3792 =l extsw 2 + %.3793 =l mul %.3792, 1 + %.3794 =l add $g_132, %.3793 + storel %.3794, %.3791 + %.3795 =l add %.3699, 216 + %.3796 =l extsw 0 + %.3797 =l copy %.3796 + storel %.3797, %.3795 + %.3798 =l add %.3699, 224 + %.3799 =l extsw 4 + %.3800 =l mul %.3799, 1 + %.3801 =l add $g_132, %.3800 + storel %.3801, %.3798 + %.3802 =l add %.3699, 232 + %.3803 =l extsw 0 + %.3804 =l copy %.3803 + storel %.3804, %.3802 + %.3805 =l add %.3699, 240 + %.3806 =l extsw 2 + %.3807 =l mul %.3806, 1 + %.3808 =l add $g_132, %.3807 + storel %.3808, %.3805 + %.3809 =l add %.3699, 248 + %.3810 =l extsw 0 + %.3811 =l copy %.3810 + storel %.3811, %.3809 + %.3812 =l add %.3699, 256 + %.3813 =l extsw 4 + %.3814 =l mul %.3813, 1 + %.3815 =l add $g_132, %.3814 + storel %.3815, %.3812 + %.3816 =l add %.3699, 264 + %.3817 =l extsw 0 + %.3818 =l copy %.3817 + storel %.3818, %.3816 + %.3819 =l add %.3699, 272 + %.3820 =l extsw 2 + %.3821 =l mul %.3820, 1 + %.3822 =l add $g_132, %.3821 + storel %.3822, %.3819 + %.3823 =l add %.3699, 280 + %.3824 =l extsw 0 + %.3825 =l copy %.3824 + storel %.3825, %.3823 + %.3826 =l add %.3699, 288 + %.3827 =l extsw 4 + %.3828 =l mul %.3827, 1 + %.3829 =l add $g_132, %.3828 + storel %.3829, %.3826 + %.3830 =l add %.3699, 296 + %.3831 =l extsw 0 + %.3832 =l copy %.3831 + storel %.3832, %.3830 + %.3833 =l add %.3699, 304 + %.3834 =l extsw 2 + %.3835 =l mul %.3834, 1 + %.3836 =l add $g_132, %.3835 + storel %.3836, %.3833 + %.3837 =l add %.3699, 312 + %.3838 =l extsw 0 + %.3839 =l copy %.3838 + storel %.3839, %.3837 + %.3841 =l add %.3840, 0 + %.3842 =l extsw 0 + %.3843 =l copy %.3842 + storel %.3843, %.3841 + %.3845 =l add %.3844, 0 + %.3846 =l extsw 0 + %.3847 =l copy %.3846 + storel %.3847, %.3845 + %.3849 =l add %.3848, 0 + storel $g_46, %.3849 + %.3851 =l add %.3850, 0 + storel $g_57, %.3851 + %.3852 =l add %.3850, 8 + storel $g_57, %.3852 + %.3853 =l add %.3850, 16 + storel $g_57, %.3853 + %.3854 =l add %.3850, 24 + storel $g_57, %.3854 + %.3855 =l add %.3850, 32 + storel $g_57, %.3855 + %.3856 =l add %.3850, 40 + storel $g_57, %.3856 + %.3857 =l add %.3850, 48 + storel $g_57, %.3857 + %.3858 =l add %.3850, 56 + storel $g_57, %.3858 + %.3860 =l add %.3859, 0 + storel $g_84, %.3860 + %.3862 =l add %.3861, 0 + %.3863 =l extsw 0 + %.3864 =l copy %.3863 + storel %.3864, %.3862 + %.3866 =l add %.3865, 0 + %.3867 =l extsw 0 + %.3868 =l copy %.3867 + storel %.3868, %.3866 + %.3870 =l add %.3869, 0 + %.3871 =w copy 1589124801 + storew %.3871, %.3870 + %.3873 =l add %.3872, 0 + %.3874 =l copy $g_185 + %.3875 =l mul 24, 1 + %.3876 =l add %.3874, %.3875 + %.3877 =l copy %.3876 + storel %.3877, %.3873 + %.3879 =l add %.3878, 0 + storel $g_265, %.3879 + %.3881 =l add %.3880, 0 + %.3882 =w copy 1 + storeb %.3882, %.3881 + %.3884 =l add %.3883, 0 + storel $g_296, %.3884 + %.3886 =l add %.3885, 0 + %.3887 =w copy 2320921989 + storew %.3887, %.3886 + %.3891 =l loadl %.2 + storel %.3891, %.2 + %.3892 =l loadl %.3496 + storel $g_46, %.3892 + %.3893 =l extsw 6 + %.3894 =l mul %.3893, 36 + %.3895 =l add %.3498, %.3894 + %.3896 =l extsw 6 + %.3897 =l mul %.3896, 4 + %.3898 =l add %.3895, %.3897 + %.3899 =l extsw 0 + %.3900 =l mul %.3899, 4 + %.3901 =l add %.3898, %.3900 + %.3902 =w loadsw %.3901 + %.3903 =l extsw 0 + %.3904 =l extsw 4 + %.3905 =l mul %.3904, 64 + %.3906 =l add %.3699, %.3905 + %.3907 =l extsw 3 + %.3908 =l mul %.3907, 16 + %.3909 =l add %.3906, %.3908 + %.3910 =l extsw 0 + %.3911 =l mul %.3910, 8 + %.3912 =l add %.3909, %.3911 + %.3913 =l loadl %.3912 + %.3914 =w cnel %.3903, %.3913 + %.3915 =w copy %.3914 + %.3916 =l loadl %.3848 + storeb %.3915, %.3916 + %.3917 =w loadub $g_57 + %.3918 =w add %.3917, 1 + storeb %.3918, $g_57 + %.3919 =w call $safe_add_func_uint8_t_u_u(w %.3915, w %.3918) + %.3920 =w extub %.3919 + %.3921 =w ceqw %.3920, 0 + %.3922 =w cnew %.3902, %.3921 + %.3923 =w copy %.3922 + %.3924 =l loadl %.3859 + storew %.3923, %.3924 + %.3925 =w loadsb %.2872 + %.3926 =w extsb %.3925 + %.3927 =w call $safe_div_func_uint32_t_u_u(w %.3923, w %.3926) + %.3928 =w copy %.3927 + %.3929 =l extsw 3 + %.3930 =l mul %.3929, 36 + %.3931 =l add %.3498, %.3930 + %.3932 =l extsw 4 + %.3933 =l mul %.3932, 4 + %.3934 =l add %.3931, %.3933 + %.3935 =l extsw 0 + %.3936 =l mul %.3935, 4 + %.3937 =l add %.3934, %.3936 + %.3938 =w loadsw %.3937 + %.3939 =w loadsw %.2122 + %.3940 =w copy %.3939 + %.3941 =w copy 2 + %.3942 =w call $safe_rshift_func_uint8_t_u_u(w %.3940, w %.3941) + %.3943 =w extub %.3942 + %.3944 =l loadl $g_88 + %.3945 =l loadl %.3944 + %.3946 =l loadl %.2128 + %.3947 =l extsw 0 + %.3948 =l mul %.3947, 8 + %.3949 =l add $g_172, %.3948 + storel %.3946, %.3949 + %.3950 =w ceql %.3945, %.3946 + %.3951 =l extsw %.3950 + %.3952 =l loadl $g_58 + %.3953 =w cnel %.3951, %.3952 + %.3954 =w copy %.3953 + %.3955 =w loaduw %.4 + %.3956 =w copy %.3955 + %.3957 =w call $safe_div_func_int16_t_s_s(w %.3954, w %.3956) + %.3958 =w extsh %.3957 + %.3959 =w csgtw %.3943, %.3958 + %.3960 =w csgew %.3938, %.3959 + %.3961 =w copy %.3960 + %.3962 =w copy 7 + %.3963 =w call $safe_lshift_func_int16_t_s_u(w %.3961, w %.3962) + %.3964 =l extsh %.3963 + %.3965 =l extsw 0 + %.3966 =l sub %.3965, 1 + %.3967 =l and %.3964, %.3966 + %.3968 =w loadsb %.2872 + %.3969 =l extsb %.3968 + %.3970 =w cnel %.3967, %.3969 + %.3971 =l extsw %.3970 + %.3972 =w loadsb $g_2 + %.3973 =l extsb %.3972 + %.3974 =l call $safe_add_func_uint64_t_u_u(l %.3971, l %.3973) + %.3975 =l copy $g_130 + %.3976 =l mul 8, 1 + %.3977 =l add %.3975, %.3976 + %.3978 =l copy %.3977 + %.3979 =w loadsh %.3978 + %.3980 =l extsh %.3979 + %.3981 =l and %.3974, %.3980 + %.3982 =l extsw 1 + %.3983 =l mul %.3982, 1 + %.3984 =l add $g_132, %.3983 + %.3985 =w loadsb %.3984 + %.3986 =l extsb %.3985 + %.3987 =l or %.3981, %.3986 + %.3988 =w copy %.3987 + %.3989 =w call $safe_rshift_func_uint16_t_u_s(w %.3928, w %.3988) + %.3990 =l loadl %.3865 + %.3991 =l loadl $g_88 + %.3992 =l loadl %.3991 + %.3993 =l loadl %.3992 + %.3994 =w ceql %.3990, %.3993 + %.3995 =w cnew %.3994, 0 + jnz %.3995, @logic_right.1260, @logic_join.1261 +@logic_right.1260 + %.3996 =w loadub %.6 + %.3997 =w extub %.3996 + %.3998 =w cnew %.3997, 0 +@logic_join.1261 + %.3999 =w phi @if_true.1258 %.3995, @logic_right.1260 %.3998 + %.4000 =l copy $g_130 + %.4001 =l mul 0, 1 + %.4002 =l add %.4000, %.4001 + %.4003 =l copy %.4002 + %.4004 =w loadsw %.4003 + %.4005 =w csgew %.3999, %.4004 + %.4006 =w loadub %.6 + %.4007 =w extub %.4006 + %.4008 =w ceqw %.4005, %.4007 + %.4009 =l extsw %.4008 + %.4010 =w loadsb $g_2 + %.4011 =l extsb %.4010 + %.4012 =l call $safe_mod_func_uint64_t_u_u(l %.4009, l %.4011) + %.4013 =w loaduw %.4 + %.4014 =l extuw %.4013 + %.4015 =w cugtl %.4012, %.4014 + %.4016 =l extsw %.4015 + %.4017 =l or %.4016, 0 + %.4018 =l copy 4143169914 + %.4019 =l or %.4017, %.4018 + %.4020 =w copy %.4019 + %.4021 =w call $safe_unary_minus_func_uint16_t_u(w %.4020) + %.4022 =w copy 10535 + %.4023 =w copy 1 + %.4024 =w call $safe_lshift_func_int16_t_s_u(w %.4022, w %.4023) + %.4025 =w ceql $g_46, $g_46 + %.4026 =w loadub %.6 + %.4027 =l loadl %.2128 + %.4028 =l loadl %.4027 + %.4029 =w loadsw %.4028 + %.4030 =w cnew %.4029, 0 + jnz %.4030, @if_true.1262, @if_false.1263 +@if_true.1262 + %.4032 =l add %.4031, 0 + storel $g_185, %.4032 + %.4033 =l add %.4031, 8 + storel $g_185, %.4033 + %.4034 =l add %.4031, 16 + storel $g_185, %.4034 + %.4035 =l add %.4031, 24 + storel $g_185, %.4035 + %.4036 =l add %.4031, 32 + storel $g_185, %.4036 + %.4037 =l add %.4031, 40 + storel $g_185, %.4037 + %.4039 =l add %.4038, 0 + %.4040 =l copy 1 + storel %.4040, %.4039 + %.4042 =l add %.4041, 0 + storel %.2875, %.4042 + %.4044 =l add %.4043, 0 + %.4045 =l extsw 3 + %.4046 =l mul %.4045, 360 + %.4047 =l add %.250, %.4046 + %.4048 =l extsw 1 + %.4049 =l mul %.4048, 120 + %.4050 =l add %.4047, %.4049 + %.4051 =l extsw 4 + %.4052 =l mul %.4051, 20 + %.4053 =l add %.4050, %.4052 + storel %.4053, %.4044 + %.4055 =l add %.4054, 0 + %.4056 =l extsw 0 + %.4057 =l copy %.4056 + storel %.4057, %.4055 + %.4058 =l add %.4054, 8 + storel %.4043, %.4058 + %.4059 =l add %.4054, 16 + %.4060 =l extsw 0 + %.4061 =l copy %.4060 + storel %.4061, %.4059 + %.4062 =l add %.4054, 24 + %.4063 =l extsw 0 + %.4064 =l copy %.4063 + storel %.4064, %.4062 + %.4065 =l add %.4054, 32 + storel %.4043, %.4065 + %.4066 =l add %.4054, 40 + %.4067 =l extsw 0 + %.4068 =l copy %.4067 + storel %.4068, %.4066 + %.4071 =l extsw 0 + %.4072 =w cnel %.4071, %.248 + %.4073 =l loadl %.2 + %.4074 =w loadsw %.4073 + %.4075 =l loadl $g_88 + %.4076 =l loadl %.4075 + %.4077 =l loadl %.4076 + %.4078 =w loadsw %.4077 + %.4079 =l loadl %.2 + %.4080 =w loadsw %.4079 + %.4081 =w call $safe_sub_func_int32_t_s_s(w %.4078, w %.4080) + %.4082 =w csgtw %.4074, %.4081 + %.4083 =w copy %.4082 + %.4084 =w loaduw %.4 + %.4085 =w culew %.4083, %.4084 + %.4086 =l xor 155, 18446744073709551615 + %.4087 =w cnel %.4086, 0 + jnz %.4087, @logic_right.1264, @logic_join.1265 +@logic_right.1264 + %.4088 =l loadl %.3496 + %.4089 =l loadl %.4088 + %.4090 =w loadub %.4089 + %.4091 =w sub %.4090, 1 + storeb %.4091, %.4089 + %.4092 =l extsw 4 + %.4093 =l mul %.4092, 1 + %.4094 =l add $g_132, %.4093 + %.4095 =w loadsb %.4094 + %.4096 =w copy %.4095 + %.4097 =w call $safe_sub_func_uint8_t_u_u(w %.4091, w %.4096) + %.4098 =w extub %.4097 + %.4099 =w cnew %.4098, 0 +@logic_join.1265 + %.4100 =w phi @if_true.1262 %.4087, @logic_right.1264 %.4099 + %.4101 =l extsw 4 + %.4102 =l mul %.4101, 1 + %.4103 =l add $g_132, %.4102 + %.4104 =w loadsb %.4103 + %.4105 =w copy %.4104 + %.4106 =w loadub %.6 + %.4107 =w extub %.4106 + %.4108 =w call $safe_lshift_func_uint8_t_u_u(w %.4105, w %.4107) + %.4109 =w extub %.4108 + %.4110 =w csgew %.4100, %.4109 + %.4111 =w ceqw %.4085, %.4110 + %.4112 =w cnew %.4111, 0 + jnz %.4112, @if_true.1266, @if_false.1267 +@if_true.1266 + %.4114 =l add %.4113, 0 + storel $g_185, %.4114 + %.4116 =l add %.4115, 0 + storel %.4113, %.4116 + %.4118 =l add %.4117, 0 + %.4119 =w copy 2 + storew %.4119, %.4118 + %.4121 =l add %.4120, 0 + %.4122 =l copy $g_185 + %.4123 =l mul 8, 1 + %.4124 =l add %.4122, %.4123 + %.4125 =l copy %.4124 + storel %.4125, %.4121 + %.4127 =l add %.4126, 0 + %.4128 =l copy 1 + storel %.4128, %.4127 + %.4129 =l loadl %.4113 + %.4130 =l loadl %.4115 + storel %.4129, %.4130 + storel %.4129, %.2130 + %.4131 =l loadl $g_173 + %.4132 =w loadsw %.4131 + %.4133 =l extsw %.4132 + %.4134 =w loadsw %.4117 + %.4135 =l extsw %.4134 + %.4136 =l copy 1533123651342385939 + %.4137 =l copy $g_185 + %.4138 =l mul 24, 1 + %.4139 =l add %.4137, %.4138 + %.4140 =l copy %.4139 + storel %.4136, %.4140 + %.4141 =l copy %.4136 + %.4142 =l call $safe_sub_func_int64_t_s_s(l 8436840764840713857, l %.4141) + %.4143 =l or %.4135, %.4142 + %.4144 =l or %.4133, %.4143 + %.4145 =w copy %.4144 + storew %.4145, %.4131 + %.4146 =l loadl $g_173 + %.4147 =w loadsw %.4146 + %.4148 =l extsw 0 + %.4149 =l mul %.4148, 48 + %.4150 =l add %.4031, %.4149 + %.4151 =l extsw 4 + %.4152 =l mul %.4151, 8 + %.4153 =l add %.4150, %.4152 + %.4154 =l loadl %.4153 + storel %.4154, $g_201 + %.4155 =l loadl %.4115 + %.4156 =l loadl %.4155 + %.4157 =w cnel %.4154, %.4156 + %.4158 =w copy %.4157 + %.4159 =w copy 6 + %.4160 =w call $safe_lshift_func_int16_t_s_u(w %.4158, w %.4159) + %.4161 =w copy 7 + %.4162 =w call $safe_rshift_func_int16_t_s_u(w %.4160, w %.4161) + %.4163 =w extsh %.4162 + %.4164 =l loadl %.4038 + %.4165 =l copy $g_185 + %.4166 =l mul 40, 1 + %.4167 =l add %.4165, %.4166 + %.4168 =l copy %.4167 + %.4169 =w loadsw %.4168 + %.4170 =w copy %.4169 + %.4171 =w copy 31567 + %.4172 =w call $safe_rshift_func_uint16_t_u_s(w %.4171, w 12) + %.4173 =w copy 622138554 + %.4174 =w copy 1 + %.4175 =w call $safe_sub_func_int32_t_s_s(w %.4173, w %.4174) + %.4176 =l extsw %.4175 + %.4177 =l xor 3541410248, %.4176 + %.4178 =l copy $g_130 + %.4179 =l mul 4, 1 + %.4180 =l add %.4178, %.4179 + %.4181 =l copy %.4180 + %.4182 =w loaduw %.4181 + %.4183 =w copy %.4182 + %.4184 =w loadub %.6 + %.4185 =w extub %.4184 + %.4186 =w call $safe_lshift_func_int8_t_s_s(w %.4183, w %.4185) + %.4187 =l extsb %.4186 + %.4188 =w loaduw %.4 + %.4189 =l extuw %.4188 + %.4190 =l call $safe_div_func_uint64_t_u_u(l %.4187, l %.4189) + %.4191 =w cnel %.4190, 0 + jnz %.4191, @logic_right.1272, @logic_join.1273 +@logic_right.1272 + %.4192 =w cnel 0, 0 +@logic_join.1273 + %.4193 =w phi @if_true.1266 %.4191, @logic_right.1272 %.4192 + %.4194 =l copy $g_130 + %.4195 =l mul 12, 1 + %.4196 =l add %.4194, %.4195 + %.4197 =l copy %.4196 + %.4198 =w loadsw %.4197 + %.4199 =w cslew %.4193, %.4198 + %.4200 =l extsw %.4199 + %.4201 =l or %.4200, 7 + %.4202 =w copy %.4201 + %.4203 =w call $safe_sub_func_int8_t_s_s(w %.4170, w %.4202) + %.4204 =w extsb %.4203 + %.4205 =w loaduw %.4 + %.4206 =w copy %.4205 + %.4207 =w call $safe_mul_func_uint16_t_u_u(w %.4204, w %.4206) + %.4208 =w extuh %.4207 + %.4209 =w loaduw %.4 + %.4210 =w ceqw %.4208, %.4209 + %.4211 =l extsw %.4210 + %.4212 =l loadl %.4120 + storel %.4211, %.4212 + %.4213 =l loadl $g_58 + %.4214 =l copy %.4213 + %.4215 =l call $safe_sub_func_int64_t_s_s(l %.4211, l %.4214) + %.4216 =w cnel %.4215, 0 + jnz %.4216, @logic_join.1271, @logic_right.1270 +@logic_right.1270 + %.4217 =l loadl %.2128 + %.4218 =l loadl %.4217 + %.4219 =w loadsw %.4218 + %.4220 =w cnew %.4219, 0 +@logic_join.1271 + %.4221 =w phi @logic_join.1273 %.4216, @logic_right.1270 %.4220 + %.4222 =l extsw %.4221 + %.4223 =w csgtl %.4164, %.4222 + %.4224 =l extsw %.4223 + %.4225 =l and %.4224, 2 + %.4226 =w copy %.4225 + %.4227 =l loadl %.4038 + %.4228 =w copy %.4227 + %.4229 =w call $safe_mod_func_int16_t_s_s(w %.4226, w %.4228) + %.4230 =l extsh %.4229 + %.4231 =l and 3162042065273101369, %.4230 + %.4232 =l copy 1 + %.4233 =w ceql %.4231, %.4232 + %.4234 =l extsw %.4233 + %.4235 =w csltl %.4234, 24 + %.4236 =l extsw 0 + %.4237 =l extsw 0 + %.4238 =w cnel %.4236, %.4237 + %.4239 =w cnew %.4238, 0 + jnz %.4239, @logic_right.1268, @logic_join.1269 +@logic_right.1268 + %.4240 =w cnel 1, 0 +@logic_join.1269 + %.4241 =w phi @logic_join.1271 %.4239, @logic_right.1268 %.4240 + %.4242 =w copy %.4241 + %.4243 =l copy $g_185 + %.4244 =l mul 32, 1 + %.4245 =l add %.4243, %.4244 + %.4246 =l copy %.4245 + %.4247 =w loaduw %.4246 + %.4248 =w copy %.4247 + %.4249 =w call $safe_rshift_func_int16_t_s_s(w %.4242, w %.4248) + %.4250 =w extsh %.4249 + %.4251 =w loadsw $g_24 + %.4252 =w and %.4250, %.4251 + %.4253 =w loadub %.6 + %.4254 =w copy %.4253 + %.4255 =w loadub $g_57 + %.4256 =w copy %.4255 + %.4257 =w call $safe_mul_func_int8_t_s_s(w %.4254, w %.4256) + %.4258 =w loaduw %.4 + %.4259 =l extuw %.4258 + %.4260 =l loadl %.4126 + %.4261 =w cnel %.4259, %.4260 + %.4262 =w csgew %.4163, %.4261 + %.4263 =w copy %.4262 + %.4264 =l copy $g_185 + %.4265 =l mul 32, 1 + %.4266 =l add %.4264, %.4265 + %.4267 =l copy %.4266 + %.4268 =w loaduw %.4267 + %.4269 =w call $safe_lshift_func_uint16_t_u_u(w %.4263, w %.4268) + %.4270 =w extuh %.4269 + %.4271 =w xor %.4270, 18446744073709551615 + %.4272 =w and %.4147, %.4271 + storew %.4272, %.4146 + jmp @if_join.1274 +@if_false.1267 + %.4274 =l add %.4273, 0 + %.4275 =l extsw 0 + %.4276 =l mul %.4275, 48 + %.4277 =l add %.4031, %.4276 + %.4278 =l extsw 4 + %.4279 =l mul %.4278, 8 + %.4280 =l add %.4277, %.4279 + storel %.4280, %.4274 + %.4281 =l loadl %.2130 + %.4282 =l loadl %.4273 + storel %.4281, %.4282 + %.4283 =l loadl $g_88 + %.4284 =l loadl %.4283 + %.4285 =l loadl %.4284 + %.4286 =w loadsw %.4285 + %.4287 =w cnew %.4286, 0 + jnz %.4287, @if_true.1275, @if_false.1276 +@if_true.1275 + jmp @for_cont.1254 +@if_false.1276 + %.4288 =l loadl %.2 + %.4289 =w loadsw %.4288 + %.4290 =w cnew %.4289, 0 + jnz %.4290, @if_true.1277, @if_false.1278 +@if_true.1277 + jmp @for_cont.1254 +@if_false.1278 +@if_join.1274 + %.4291 =l loadl %.2875 + %.4292 =l loadl %.4041 + storel %.4291, %.4292 + storel %.4291, %.2132 + jmp @if_join.1279 +@if_false.1263 + %.4293 =w copy 15 + %.4294 =l copy $g_185 + %.4295 =l mul 0, 1 + %.4296 =l add %.4294, %.4295 + %.4297 =l copy %.4296 + storeb %.4293, %.4297 +@for_cond.1280 + %.4298 =l copy $g_185 + %.4299 =l mul 0, 1 + %.4300 =l add %.4298, %.4299 + %.4301 =l copy %.4300 + %.4302 =w loadub %.4301 + %.4303 =w extub %.4302 + %.4304 =w csltw %.4303, 22 + jnz %.4304, @for_body.1281, @for_join.1283 +@for_body.1281 + %.4306 =l add %.4305, 0 + %.4307 =l extsw 5 + %.4308 =l mul %.4307, 320 + %.4309 =l add %.7, %.4308 + %.4310 =l extsw 1 + %.4311 =l mul %.4310, 64 + %.4312 =l add %.4309, %.4311 + %.4313 =l extsw 2 + %.4314 =l mul %.4313, 8 + %.4315 =l add %.4312, %.4314 + storel %.4315, %.4306 + %.4316 =l loadl %.2 + %.4317 =l loadl %.4305 + storel %.4316, %.4317 + %.4318 =l copy $g_130 + %.4319 =l mul 12, 1 + %.4320 =l add %.4318, %.4319 + %.4321 =l copy %.4320 + %.4322 =w loadsw %.4321 + %.4323 =w cnew %.4322, 0 + jnz %.4323, @if_true.1284, @if_false.1285 +@if_true.1284 + jmp @lbl_234.1237 +@if_false.1285 +@for_cont.1282 + %.4324 =l copy $g_185 + %.4325 =l mul 0, 1 + %.4326 =l add %.4324, %.4325 + %.4327 =l copy %.4326 + %.4328 =w loadub %.4327 + %.4329 =l extub %.4328 + %.4330 =l extsw 2 + %.4331 =l call $safe_add_func_int64_t_s_s(l %.4329, l %.4330) + %.4332 =w copy %.4331 + %.4333 =l copy $g_185 + %.4334 =l mul 0, 1 + %.4335 =l add %.4333, %.4334 + %.4336 =l copy %.4335 + storeb %.4332, %.4336 + jmp @for_cond.1280 +@for_join.1283 +@if_join.1279 + %.4337 =l loadl $g_80 + %.4338 =w copy %.4337 + %.4339 =l loadl %.3859 + storew %.4338, %.4339 + %.4340 =w loadsb %.2872 + %.4341 =l extsb %.4340 + %.4342 =l copy $g_130 + %.4343 =l mul 8, 1 + %.4344 =l add %.4342, %.4343 + %.4345 =l copy %.4344 + %.4346 =w loadsh %.4345 + %.4347 =l extsh %.4346 + %.4348 =l call $safe_sub_func_uint64_t_u_u(l %.4341, l %.4347) + %.4349 =w copy %.4348 + %.4350 =l copy $g_185 + %.4351 =l mul 48, 1 + %.4352 =l add %.4350, %.4351 + %.4353 =l copy %.4352 + %.4354 =w loadsw %.4353 + %.4355 =w copy %.4354 + %.4356 =w call $safe_sub_func_uint8_t_u_u(w %.4349, w %.4355) + %.4357 =w extub %.4356 + %.4358 =l copy $g_185 + %.4359 =l mul 48, 1 + %.4360 =l add %.4358, %.4359 + %.4361 =l copy %.4360 + %.4362 =w loadsw %.4361 + %.4363 =l extsw %.4362 + %.4364 =l loadl %.2128 + %.4365 =l loadl %.4364 + %.4366 =w loadsw %.4365 + %.4367 =l loadl %.2 + %.4368 =w loadsw %.4367 + %.4369 =l loadl %.2 + %.4370 =w loadsw %.4369 + %.4371 =w ceqw %.4368, %.4370 + %.4372 =w loadsw %.3869 + %.4373 =w and %.4371, %.4372 + %.4374 =w loadub %.6 + %.4375 =w extub %.4374 + %.4376 =w or %.4373, %.4375 + %.4377 =w csgtw %.4366, %.4376 + %.4378 =w copy %.4377 + %.4379 =l loadl $g_82 + %.4380 =w copy %.4379 + %.4381 =w call $safe_mod_func_uint8_t_u_u(w %.4378, w %.4380) + %.4382 =l extub %.4381 + %.4383 =l loadl %.3872 + storel %.4382, %.4383 + %.4384 =l or %.4382, 9439950986158878797 + %.4385 =w copy %.4384 + %.4386 =l copy $g_130 + %.4387 =l mul 4, 1 + %.4388 =l add %.4386, %.4387 + %.4389 =l copy %.4388 + %.4390 =w loaduw %.4389 + %.4391 =w copy %.4390 + %.4392 =w call $safe_mod_func_uint16_t_u_u(w %.4385, w %.4391) + %.4393 =l extuh %.4392 + %.4394 =w cnel %.4393, 2112011544 + %.4395 =w ceqw %.4394, 0 + %.4396 =l extsw %.4395 + %.4397 =l call $safe_add_func_int64_t_s_s(l %.4363, l %.4396) + %.4398 =w copy %.4397 + %.4399 =w call $safe_div_func_int32_t_s_s(w %.4357, w %.4398) + %.4400 =w copy %.4399 + %.4401 =w ceqw %.4338, %.4400 + %.4402 =w cnel 154, 1 + %.4403 =w cnew %.4402, 0 + jnz %.4403, @if_true.1286, @if_false.1287 +@if_true.1286 + %.4405 =l add %.4404, 0 + storel %.3859, %.4405 + %.4407 =l add %.4406, 0 + %.4408 =l extsw 0 + %.4409 =l copy %.4408 + storel %.4409, %.4407 + %.4411 =l add %.4410, 0 + storel %.4406, %.4411 + %.4413 =l add %.4412, 0 + %.4414 =l extsw 2 + %.4415 =l mul %.4414, 360 + %.4416 =l add %.250, %.4415 + %.4417 =l extsw 1 + %.4418 =l mul %.4417, 120 + %.4419 =l add %.4416, %.4418 + %.4420 =l extsw 1 + %.4421 =l mul %.4420, 20 + %.4422 =l add %.4419, %.4421 + %.4423 =l copy %.4422 + %.4424 =l mul 12, 1 + %.4425 =l add %.4423, %.4424 + %.4426 =l copy %.4425 + storel %.4426, %.4413 + %.4428 =l add %.4427, 0 + %.4429 =l extsw 0 + %.4430 =l sub %.4429, 1 + %.4431 =w copy %.4430 + storew %.4431, %.4428 + %.4434 =l add %.4433, 0 + %.4435 =l copy 1 + storel %.4435, %.4434 + %.4437 =l add %.4436, 0 + %.4438 =l copy 5986165483539914317 + storel %.4438, %.4437 + storew 0, %.4439 +@for_cond.1288 + %.4440 =w loadsw %.4439 + %.4441 =w csltw %.4440, 2 + jnz %.4441, @for_body.1289, @for_join.1291 +@for_body.1289 + %.4442 =w loadsw %.4439 + %.4443 =l extsw %.4442 + %.4444 =l mul %.4443, 8 + %.4445 =l add %.4432, %.4444 + storel $g_2, %.4445 +@for_cont.1290 + %.4446 =w loadsw %.4439 + %.4447 =w add %.4446, 1 + storew %.4447, %.4439 + jmp @for_cond.1288 +@for_join.1291 + %.4448 =l loadl $g_173 + %.4449 =w loadsw %.4448 + %.4450 =l loadl %.4404 + storel %.4, %.4450 + %.4451 =l loadl %.2130 + %.4452 =l loadl $g_201 + %.4453 =l loadl %.4451 + storel %.4453, %.4452 + %.4454 =l add %.4451, 8 + %.4455 =l add %.4452, 8 + %.4456 =l loadl %.4454 + storel %.4456, %.4455 + %.4457 =l add %.4454, 8 + %.4458 =l add %.4455, 8 + %.4459 =l loadl %.4457 + storel %.4459, %.4458 + %.4460 =l add %.4457, 8 + %.4461 =l add %.4458, 8 + %.4462 =l loadl %.4460 + storel %.4462, %.4461 + %.4463 =l add %.4460, 8 + %.4464 =l add %.4461, 8 + %.4465 =l loadl %.4463 + storel %.4465, %.4464 + %.4466 =l add %.4463, 8 + %.4467 =l add %.4464, 8 + %.4468 =l loadl %.4466 + storel %.4468, %.4467 + %.4469 =l add %.4466, 8 + %.4470 =l add %.4467, 8 + %.4471 =l loadl %.4469 + storel %.4471, %.4470 + %.4472 =l add %.4469, 8 + %.4473 =l add %.4470, 8 + %.4474 =w cnel %.4, %.4 + %.4475 =w and %.4449, %.4474 + storew %.4475, %.4448 + %.4476 =l extsw %.4475 + %.4477 =w loadub %.6 + %.4478 =l extub %.4477 + %.4479 =w loadsb %.2872 + %.4480 =l extsb %.4479 + %.4481 =l call $safe_div_func_uint64_t_u_u(l %.4478, l %.4480) + %.4482 =w copy %.4481 + %.4483 =l copy $g_130 + %.4484 =l mul 8, 1 + %.4485 =l add %.4483, %.4484 + %.4486 =l copy %.4485 + %.4487 =w loadsh %.4486 + %.4488 =w copy %.4487 + %.4489 =w call $safe_sub_func_int8_t_s_s(w %.4482, w %.4488) + %.4490 =w extsb %.4489 + %.4491 =w call $safe_rshift_func_uint16_t_u_s(w %.4490, w 4) + %.4492 =w extuh %.4491 + %.4493 =w cnew %.4492, 0 + jnz %.4493, @logic_right.1292, @logic_join.1293 +@logic_right.1292 + %.4494 =w loadub %.6 + %.4495 =w extub %.4494 + %.4496 =w cnew %.4495, 0 +@logic_join.1293 + %.4497 =w phi @for_join.1291 %.4493, @logic_right.1292 %.4496 + %.4498 =l loadl %.4406 + %.4499 =l loadl %.4410 + storel %.4498, %.4499 + %.4500 =l loadl %.3878 + %.4501 =w ceql %.4498, %.4500 + %.4502 =w copy %.4501 + %.4503 =w loaduw %.4 + %.4504 =w copy %.4503 + %.4505 =w call $safe_mul_func_int16_t_s_s(w %.4502, w %.4504) + %.4506 =w extsh %.4505 + %.4507 =w and %.4497, %.4506 + %.4508 =l loadl %.4412 + storew %.4507, %.4508 + %.4509 =l loadl $g_82 + %.4510 =l extsw 0 + %.4511 =w ceql %.4509, %.4510 + %.4512 =l extsw %.4511 + %.4513 =w loadsw %.4427 + %.4514 =l extsw %.4513 + %.4515 =l call $safe_add_func_int64_t_s_s(l %.4512, l %.4514) + %.4516 =w csltl %.4476, %.4515 + %.4517 =w cnew %.4516, 0 + jnz %.4517, @if_true.1294, @if_false.1295 +@if_true.1294 + %.4518 =l loadl $g_173 + %.4519 =w loadsw %.4518 + %.4520 =w loadsb %.2872 + %.4521 =w extsb %.4520 + %.4522 =w copy 2 + %.4523 =w call $safe_lshift_func_int16_t_s_u(w %.4521, w %.4522) + %.4524 =w extsh %.4523 + %.4525 =w or %.4519, %.4524 + storew %.4525, %.4518 + %.4526 =w loaduw $g_115 + %.4527 =w cnew %.4526, 0 + jnz %.4527, @if_true.1296, @if_false.1297 +@if_true.1296 + jmp @lbl_234.1237 +@if_false.1297 + %.4528 =l loadl %.2 + storel %.4528, %.3865 + %.4529 =w loadsb %.2872 + %.4530 =w extsb %.4529 + %.4531 =w cnew %.4530, 0 + jnz %.4531, @if_true.1298, @if_false.1299 +@if_true.1298 + jmp @for_join.1255 +@if_false.1299 + jmp @if_join.1300 +@if_false.1295 + %.4533 =l add %.4532, 0 + %.4534 =w copy 4 + storew %.4534, %.4533 + %.4536 =l add %.4535, 0 + storel %.4410, %.4536 + %.4537 =l add %.4535, 8 + %.4538 =l extsw 0 + %.4539 =l copy %.4538 + storel %.4539, %.4537 + %.4540 =l add %.4535, 16 + storel %.4410, %.4540 + %.4541 =l add %.4535, 24 + %.4542 =l extsw 0 + %.4543 =l copy %.4542 + storel %.4543, %.4541 + %.4544 =l add %.4535, 32 + storel %.4410, %.4544 + %.4545 =l add %.4535, 40 + %.4546 =l extsw 0 + %.4547 =l copy %.4546 + storel %.4547, %.4545 + %.4549 =l add %.4548, 0 + %.4550 =l extsw 0 + %.4551 =l sub %.4550, 8 + %.4552 =w copy %.4551 + storew %.4552, %.4549 + %.4553 =l add %.4548, 4 + %.4554 =w copy 3696835799 + storew %.4554, %.4553 + %.4555 =l add %.4548, 8 + %.4556 =w copy 2764261059 + storew %.4556, %.4555 + %.4557 =l add %.4548, 12 + %.4558 =w copy 1 + storew %.4558, %.4557 + %.4559 =l add %.4548, 16 + %.4560 =w copy 0 + storew %.4560, %.4559 + %.4561 =l add %.4548, 20 + %.4562 =w copy 2545267655 + storew %.4562, %.4561 + %.4563 =l add %.4548, 24 + %.4564 =l extsw 0 + %.4565 =l sub %.4564, 1 + %.4566 =w copy %.4565 + storew %.4566, %.4563 + %.4567 =l add %.4548, 28 + %.4568 =w copy 5 + storew %.4568, %.4567 + %.4569 =l add %.4548, 32 + %.4570 =l extsw 0 + %.4571 =l sub %.4570, 1 + %.4572 =w copy %.4571 + storew %.4572, %.4569 + %.4573 =l add %.4548, 36 + %.4574 =l extsw 0 + %.4575 =l sub %.4574, 1 + %.4576 =w copy %.4575 + storew %.4576, %.4573 + %.4577 =l add %.4548, 40 + %.4578 =w copy 2 + storew %.4578, %.4577 + %.4579 =l add %.4548, 44 + %.4580 =w copy 3473621425 + storew %.4580, %.4579 + %.4581 =l add %.4548, 48 + %.4582 =w copy 1 + storew %.4582, %.4581 + %.4583 =l add %.4548, 52 + %.4584 =w copy 1958032190 + storew %.4584, %.4583 + %.4585 =l add %.4548, 56 + %.4586 =l extsw 0 + %.4587 =l sub %.4586, 1 + %.4588 =w copy %.4587 + storew %.4588, %.4585 + %.4589 =l add %.4548, 60 + %.4590 =w copy 3473621425 + storew %.4590, %.4589 + %.4591 =l add %.4548, 64 + %.4592 =w copy 1958032190 + storew %.4592, %.4591 + %.4593 =l add %.4548, 68 + %.4594 =w copy 2545267655 + storew %.4594, %.4593 + %.4595 =l add %.4548, 72 + %.4596 =w copy 0 + storew %.4596, %.4595 + %.4597 =l add %.4548, 76 + %.4598 =w copy 2 + storew %.4598, %.4597 + %.4599 =l add %.4548, 80 + %.4600 =w copy 728500888 + storew %.4600, %.4599 + %.4601 =l add %.4548, 84 + %.4602 =w copy 3473621425 + storew %.4602, %.4601 + %.4603 =l add %.4548, 88 + %.4604 =w copy 5 + storew %.4604, %.4603 + %.4605 =l add %.4548, 92 + %.4606 =w copy 728500888 + storew %.4606, %.4605 + %.4607 =l add %.4548, 96 + %.4608 =w copy 1 + storew %.4608, %.4607 + %.4609 =l add %.4548, 100 + %.4610 =w copy 0 + storew %.4610, %.4609 + %.4611 =l add %.4548, 104 + %.4612 =w copy 2545267655 + storew %.4612, %.4611 + %.4613 =l add %.4548, 108 + %.4614 =l extsw 0 + %.4615 =l sub %.4614, 1 + %.4616 =w copy %.4615 + storew %.4616, %.4613 + %.4617 =l add %.4548, 112 + %.4618 =w copy 5 + storew %.4618, %.4617 + %.4619 =l add %.4548, 116 + %.4620 =l extsw 0 + %.4621 =l sub %.4620, 1 + %.4622 =w copy %.4621 + storew %.4622, %.4619 + %.4623 =l add %.4548, 120 + %.4624 =l extsw 0 + %.4625 =l sub %.4624, 1 + %.4626 =w copy %.4625 + storew %.4626, %.4623 + %.4627 =l add %.4548, 124 + %.4628 =w copy 2 + storew %.4628, %.4627 + %.4629 =l add %.4548, 128 + %.4630 =w copy 3473621425 + storew %.4630, %.4629 + %.4631 =l add %.4548, 132 + %.4632 =w copy 1 + storew %.4632, %.4631 + %.4633 =l add %.4548, 136 + %.4634 =w copy 1958032190 + storew %.4634, %.4633 + %.4635 =l add %.4548, 140 + %.4636 =l extsw 0 + %.4637 =l sub %.4636, 1 + %.4638 =w copy %.4637 + storew %.4638, %.4635 + %.4639 =l add %.4548, 144 + %.4640 =w copy 3473621425 + storew %.4640, %.4639 + %.4641 =l add %.4548, 148 + %.4642 =w copy 1958032190 + storew %.4642, %.4641 + %.4643 =l add %.4548, 152 + %.4644 =w copy 2545267655 + storew %.4644, %.4643 + %.4645 =l add %.4548, 156 + %.4646 =w copy 0 + storew %.4646, %.4645 + %.4647 =l add %.4548, 160 + %.4648 =w copy 2 + storew %.4648, %.4647 + %.4649 =l add %.4548, 164 + %.4650 =w copy 728500888 + storew %.4650, %.4649 + %.4651 =l add %.4548, 168 + %.4652 =w copy 3473621425 + storew %.4652, %.4651 + %.4653 =l add %.4548, 172 + %.4654 =w copy 5 + storew %.4654, %.4653 + %.4655 =l add %.4548, 176 + %.4656 =w copy 728500888 + storew %.4656, %.4655 + %.4657 =l add %.4548, 180 + %.4658 =w copy 1 + storew %.4658, %.4657 + %.4659 =l add %.4548, 184 + %.4660 =w copy 0 + storew %.4660, %.4659 + %.4661 =l add %.4548, 188 + %.4662 =w copy 2545267655 + storew %.4662, %.4661 + %.4663 =l add %.4548, 192 + %.4664 =l extsw 0 + %.4665 =l sub %.4664, 1 + %.4666 =w copy %.4665 + storew %.4666, %.4663 + %.4667 =l add %.4548, 196 + %.4668 =w copy 5 + storew %.4668, %.4667 + %.4669 =l add %.4548, 200 + %.4670 =l extsw 0 + %.4671 =l sub %.4670, 1 + %.4672 =w copy %.4671 + storew %.4672, %.4669 + %.4673 =l add %.4548, 204 + %.4674 =l extsw 0 + %.4675 =l sub %.4674, 1 + %.4676 =w copy %.4675 + storew %.4676, %.4673 + %.4677 =l add %.4548, 208 + %.4678 =w copy 2 + storew %.4678, %.4677 + %.4679 =l add %.4548, 212 + %.4680 =w copy 3473621425 + storew %.4680, %.4679 + %.4681 =l add %.4548, 216 + %.4682 =w copy 1 + storew %.4682, %.4681 + %.4683 =l add %.4548, 220 + %.4684 =w copy 1958032190 + storew %.4684, %.4683 + %.4685 =l add %.4548, 224 + %.4686 =l extsw 0 + %.4687 =l sub %.4686, 1 + %.4688 =w copy %.4687 + storew %.4688, %.4685 + %.4689 =l add %.4548, 228 + %.4690 =w copy 3473621425 + storew %.4690, %.4689 + %.4691 =l add %.4548, 232 + %.4692 =w copy 1958032190 + storew %.4692, %.4691 + %.4693 =l add %.4548, 236 + %.4694 =w copy 2545267655 + storew %.4694, %.4693 + %.4695 =l add %.4548, 240 + %.4696 =w copy 0 + storew %.4696, %.4695 + %.4697 =l add %.4548, 244 + %.4698 =w copy 2 + storew %.4698, %.4697 + %.4699 =l add %.4548, 248 + %.4700 =w copy 728500888 + storew %.4700, %.4699 + %.4701 =l add %.4548, 252 + %.4702 =w copy 3473621425 + storew %.4702, %.4701 + %.4703 =l add %.4548, 256 + %.4704 =w copy 5 + storew %.4704, %.4703 + %.4705 =l add %.4548, 260 + %.4706 =w copy 728500888 + storew %.4706, %.4705 + %.4707 =l add %.4548, 264 + %.4708 =w copy 1 + storew %.4708, %.4707 + %.4709 =l add %.4548, 268 + %.4710 =w copy 0 + storew %.4710, %.4709 + %.4711 =l add %.4548, 272 + %.4712 =w copy 2545267655 + storew %.4712, %.4711 + %.4713 =l add %.4548, 276 + %.4714 =l extsw 0 + %.4715 =l sub %.4714, 1 + %.4716 =w copy %.4715 + storew %.4716, %.4713 + %.4717 =l add %.4548, 280 + %.4718 =w copy 5 + storew %.4718, %.4717 + %.4719 =l add %.4548, 284 + %.4720 =l extsw 0 + %.4721 =l sub %.4720, 1 + %.4722 =w copy %.4721 + storew %.4722, %.4719 + %.4723 =l add %.4548, 288 + %.4724 =l extsw 0 + %.4725 =l sub %.4724, 1 + %.4726 =w copy %.4725 + storew %.4726, %.4723 + %.4727 =l add %.4548, 292 + %.4728 =w copy 2 + storew %.4728, %.4727 + %.4729 =l add %.4548, 296 + %.4730 =w copy 3473621425 + storew %.4730, %.4729 + %.4731 =l add %.4548, 300 + %.4732 =w copy 1 + storew %.4732, %.4731 + %.4733 =l add %.4548, 304 + %.4734 =w copy 1958032190 + storew %.4734, %.4733 + %.4735 =l add %.4548, 308 + %.4736 =l extsw 0 + %.4737 =l sub %.4736, 1 + %.4738 =w copy %.4737 + storew %.4738, %.4735 + %.4739 =l add %.4548, 312 + %.4740 =w copy 3473621425 + storew %.4740, %.4739 + %.4741 =l add %.4548, 316 + %.4742 =w copy 1958032190 + storew %.4742, %.4741 + %.4743 =l add %.4548, 320 + %.4744 =w copy 2545267655 + storew %.4744, %.4743 + %.4745 =l add %.4548, 324 + %.4746 =w copy 0 + storew %.4746, %.4745 + %.4747 =l add %.4548, 328 + %.4748 =w copy 2 + storew %.4748, %.4747 + %.4749 =l add %.4548, 332 + %.4750 =w copy 728500888 + storew %.4750, %.4749 + %.4751 =l add %.4548, 336 + %.4752 =w copy 3473621425 + storew %.4752, %.4751 + %.4753 =l add %.4548, 340 + %.4754 =w copy 5 + storew %.4754, %.4753 + %.4755 =l add %.4548, 344 + %.4756 =w copy 728500888 + storew %.4756, %.4755 + %.4757 =l add %.4548, 348 + %.4758 =w copy 1 + storew %.4758, %.4757 + %.4759 =l add %.4548, 352 + %.4760 =w copy 0 + storew %.4760, %.4759 + %.4761 =l add %.4548, 356 + %.4762 =w copy 2545267655 + storew %.4762, %.4761 + %.4764 =l add %.4763, 0 + %.4765 =l extsw 3 + %.4766 =l mul %.4765, 320 + %.4767 =l add %.7, %.4766 + %.4768 =l extsw 3 + %.4769 =l mul %.4768, 64 + %.4770 =l add %.4767, %.4769 + %.4771 =l extsw 0 + %.4772 =l mul %.4771, 8 + %.4773 =l add %.4770, %.4772 + storel %.4773, %.4764 + %.4777 =w loaduw %.4532 + %.4778 =w copy %.4777 + %.4779 =w copy 7 + %.4780 =w call $safe_rshift_func_int8_t_s_u(w %.4778, w %.4779) + %.4781 =w extsb %.4780 + storew %.4781, %.2886 + %.4782 =w loadsw %.2010 + %.4783 =w csgew %.4781, %.4782 + %.4784 =l extsw 1 + %.4785 =l mul %.4784, 8 + %.4786 =l add %.4432, %.4785 + %.4787 =l loadl %.4786 + %.4788 =w cnel $g_2, %.4787 + %.4789 =l loadl %.2128 + %.4790 =l loadl %.4789 + %.4791 =w loadsw %.4790 + %.4792 =w cslew %.4788, %.4791 + %.4793 =w cnew %.4792, 0 + jnz %.4793, @logic_right.1301, @logic_join.1302 +@logic_right.1301 + %.4794 =w cnel 1, 0 +@logic_join.1302 + %.4795 =w phi @if_false.1295 %.4793, @logic_right.1301 %.4794 + %.4796 =l extsw 6 + %.4797 =l mul %.4796, 8 + %.4798 =l add %.3850, %.4797 + %.4799 =l loadl %.4798 + %.4800 =w ceql %.4799, %.2872 + %.4801 =w loadsb %.2872 + %.4802 =l extsb %.4801 + %.4803 =w cslel %.4802, 38 + %.4804 =l loadl %.2 + %.4805 =w loadsw %.4804 + %.4806 =w cslew %.4803, %.4805 + %.4807 =w copy %.4806 + %.4808 =w loaduw %.4 + %.4809 =w ceqw %.4807, %.4808 + %.4810 =l copy $g_185 + %.4811 =l mul 44, 1 + %.4812 =l add %.4810, %.4811 + %.4813 =l copy %.4812 + %.4814 =w loadsw %.4813 + %.4815 =w and %.4809, %.4814 + %.4816 =l extsw %.4815 + %.4817 =l copy $g_185 + %.4818 =l mul 44, 1 + %.4819 =l add %.4817, %.4818 + %.4820 =l copy %.4819 + %.4821 =w loadsw %.4820 + %.4822 =l extsw %.4821 + %.4823 =l call $safe_sub_func_int64_t_s_s(l %.4816, l %.4822) + %.4824 =l loadl %.2 + %.4825 =w loadsw %.4824 + %.4826 =l extsw %.4825 + %.4827 =w cslel %.4823, %.4826 + %.4828 =w copy %.4827 + %.4829 =w loadub %.6 + %.4830 =w extub %.4829 + %.4831 =w call $safe_rshift_func_int16_t_s_u(w %.4828, w %.4830) + %.4832 =w extsh %.4831 + %.4833 =w cnew %.4800, %.4832 + %.4834 =l loadl $g_173 + storew %.4833, %.4834 + %.4835 =l extsw 2 + %.4836 =l mul %.4835, 60 + %.4837 =l add %.4548, %.4836 + %.4838 =l extsw 3 + %.4839 =l mul %.4838, 12 + %.4840 =l add %.4837, %.4839 + %.4841 =l extsw 1 + %.4842 =l mul %.4841, 4 + %.4843 =l add %.4840, %.4842 + %.4844 =w loadsw %.4843 + %.4845 =w loadsb $g_2 + %.4846 =w copy 4 + %.4847 =w loadub %.6 + %.4848 =w extub %.4847 + storew %.4848, %.2125 + storel %.2130, %.2891 + storel %.2130, $g_296 + storel %.2130, %.2893 + %.4849 =w cnel %.2130, $g_201 + %.4850 =w csgew %.4848, %.4849 + %.4851 =w copy %.4850 + %.4852 =l loadl %.2005 + %.4853 =w loadsb %.4852 + %.4854 =l extsb %.4853 + %.4855 =l extsw 0 + %.4856 =l sub %.4855, 1 + %.4857 =l or %.4854, %.4856 + %.4858 =w copy %.4857 + storeb %.4858, %.4852 + %.4859 =w loadsw $g_50 + %.4860 =l copy $g_130 + %.4861 =l mul 16, 1 + %.4862 =l add %.4860, %.4861 + %.4863 =l copy %.4862 + %.4864 =w loaduw %.4863 + %.4865 =l copy $g_130 + %.4866 =l mul 8, 1 + %.4867 =l add %.4865, %.4866 + %.4868 =l copy %.4867 + %.4869 =w loadsh %.4868 + %.4870 =l extsh %.4869 + %.4871 =l xor %.4870, 3249 + %.4872 =w copy %.4871 + storeh %.4872, %.4868 + %.4873 =w extsh %.4872 + %.4874 =w or %.4864, %.4873 + %.4875 =w loadsb %.2872 + %.4876 =w extsb %.4875 + %.4877 =w and %.4874, %.4876 + %.4878 =w copy %.4877 + %.4879 =l extsw 0 + %.4880 =l sub %.4879, 9 + %.4881 =w copy %.4880 + %.4882 =w call $safe_div_func_int8_t_s_s(w %.4878, w %.4881) + %.4883 =l extsb %.4882 + %.4884 =l loadl $g_58 + %.4885 =l copy %.4884 + %.4886 =l call $safe_add_func_int64_t_s_s(l %.4883, l %.4885) + %.4887 =w copy %.4886 + %.4888 =w copy 20699 + %.4889 =w call $safe_add_func_int16_t_s_s(w %.4887, w %.4888) + %.4890 =w copy %.4889 + %.4891 =w loaduw $g_115 + %.4892 =w copy %.4891 + %.4893 =w call $safe_add_func_uint8_t_u_u(w %.4890, w %.4892) + %.4894 =l extub %.4893 + %.4895 =w csltl %.4894, 96816888117085888 + %.4896 =l extsw %.4895 + %.4897 =l loadl $g_82 + %.4898 =w cslel %.4896, %.4897 + %.4899 =w copy %.4898 + %.4900 =w loaduw %.4 + %.4901 =w cugtw %.4899, %.4900 + %.4902 =w ceqw %.4859, %.4901 + %.4903 =w loadsb %.3880 + %.4904 =w extsb %.4903 + %.4905 =w ceqw %.4902, %.4904 + %.4906 =l extsw %.4905 + %.4907 =w csltl %.4906, 227 + %.4908 =w copy %.4907 + %.4909 =w call $safe_sub_func_int16_t_s_s(w %.4851, w %.4908) + %.4910 =w loadsb %.2872 + %.4911 =w extsb %.4910 + %.4912 =w call $safe_add_func_int16_t_s_s(w %.4909, w %.4911) + %.4913 =w extsh %.4912 + %.4914 =l loadl $g_88 + %.4915 =l loadl %.4914 + %.4916 =l loadl %.4915 + %.4917 =w loadsw %.4916 + %.4918 =w csgew %.4913, %.4917 + %.4919 =l extsw %.4918 + %.4920 =l copy $g_265 + %.4921 =l mul 48, 1 + %.4922 =l add %.4920, %.4921 + %.4923 =l copy %.4922 + %.4924 =w loadsw %.4923 + %.4925 =l extsw %.4924 + %.4926 =l call $safe_mod_func_uint64_t_u_u(l %.4919, l %.4925) + %.4927 =w copy %.4926 + %.4928 =w loaduw %.4532 + %.4929 =w copy %.4928 + %.4930 =w call $safe_sub_func_int8_t_s_s(w %.4927, w %.4929) + %.4931 =l extsb %.4930 + %.4932 =l and %.4931, 1 + %.4933 =w copy %.4932 + %.4934 =w call $safe_mod_func_int8_t_s_s(w %.4846, w %.4933) + %.4935 =l extsb %.4934 + %.4936 =l copy $g_185 + %.4937 =l mul 36, 1 + %.4938 =l add %.4936, %.4937 + %.4939 =l copy %.4938 + %.4940 =w loaduw %.4939 + %.4941 =l extuw %.4940 + %.4942 =l call $safe_div_func_uint64_t_u_u(l %.4935, l %.4941) + %.4943 =w loadsw %.2886 + %.4944 =l extsw %.4943 + %.4945 =w cnel %.4942, %.4944 + %.4946 =w or %.4844, %.4945 + storew %.4946, %.4843 + %.4947 =w loaduw %.4532 + %.4948 =w cnew %.4947, 0 + jnz %.4948, @if_true.1303, @if_false.1304 +@if_true.1303 + jmp @for_join.1255 +@if_false.1304 + %.4949 =l loadl %.2 + %.4950 =l loadl %.4763 + storel %.4949, %.4950 +@if_join.1300 + %.4951 =w loadsw %.4427 + %.4952 =w cnew %.4951, 0 + jnz %.4952, @if_true.1305, @if_false.1306 +@if_true.1305 + %.4954 =l add %.4953, 0 + storel $g_84, %.4954 + %.4956 =l add %.4955, 0 + %.4957 =w copy 0 + storew %.4957, %.4956 + %.4959 =l add %.4958, 0 + %.4960 =w copy 862420352 + storew %.4960, %.4959 + %.4961 =l add %.4958, 4 + %.4962 =w copy 2 + storew %.4962, %.4961 + %.4963 =l add %.4958, 8 + %.4964 =w copy 3109269297 + storew %.4964, %.4963 + %.4965 =l add %.4958, 12 + %.4966 =w copy 2451567122 + storew %.4966, %.4965 + %.4967 =l add %.4958, 16 + %.4968 =w copy 862420352 + storew %.4968, %.4967 + %.4969 =l add %.4958, 20 + %.4970 =w copy 2451567122 + storew %.4970, %.4969 + %.4971 =l add %.4958, 24 + %.4972 =w copy 3109269297 + storew %.4972, %.4971 + %.4973 =l add %.4958, 28 + %.4974 =w copy 2 + storew %.4974, %.4973 + %.4975 =l add %.4958, 32 + %.4976 =w copy 862420352 + storew %.4976, %.4975 + %.4977 =l add %.4958, 36 + %.4978 =w copy 2 + storew %.4978, %.4977 + %.4979 =l add %.4958, 40 + %.4980 =w copy 0 + storew %.4980, %.4979 + %.4981 =l add %.4958, 44 + %.4982 =w copy 2451567122 + storew %.4982, %.4981 + %.4983 =l add %.4958, 48 + %.4984 =w copy 2531821652 + storew %.4984, %.4983 + %.4985 =l add %.4958, 52 + %.4986 =w copy 1738135665 + storew %.4986, %.4985 + %.4987 =l add %.4958, 56 + %.4988 =w copy 2531821652 + storew %.4988, %.4987 + %.4989 =l add %.4958, 60 + %.4990 =w copy 2451567122 + storew %.4990, %.4989 + %.4991 =l add %.4958, 64 + %.4992 =w copy 0 + storew %.4992, %.4991 + %.4993 =l add %.4958, 68 + %.4994 =w copy 1738135665 + storew %.4994, %.4993 + %.4995 =l add %.4958, 72 + %.4996 =w copy 0 + storew %.4996, %.4995 + %.4997 =l add %.4958, 76 + %.4998 =w copy 2451567122 + storew %.4998, %.4997 + %.4999 =l add %.4958, 80 + %.5000 =w copy 862420352 + storew %.5000, %.4999 + %.5001 =l add %.4958, 84 + %.5002 =w copy 1738135665 + storew %.5002, %.5001 + %.5003 =l add %.4958, 88 + %.5004 =l extsw 0 + %.5005 =l sub %.5004, 1 + %.5006 =w copy %.5005 + storew %.5006, %.5003 + %.5007 =l add %.4958, 92 + %.5008 =w copy 1738135665 + storew %.5008, %.5007 + %.5009 =l add %.4958, 96 + %.5010 =w copy 862420352 + storew %.5010, %.5009 + %.5011 =l add %.4958, 100 + %.5012 =w copy 6 + storew %.5012, %.5011 + %.5013 =l add %.4958, 104 + %.5014 =l extsw 0 + %.5015 =l sub %.5014, 1 + %.5016 =w copy %.5015 + storew %.5016, %.5013 + %.5017 =l add %.4958, 108 + %.5018 =w copy 6 + storew %.5018, %.5017 + %.5019 =l add %.4958, 112 + %.5020 =w copy 862420352 + storew %.5020, %.5019 + %.5021 =l add %.4958, 116 + %.5022 =w copy 1738135665 + storew %.5022, %.5021 + %.5023 =l add %.4958, 120 + %.5024 =w copy 2531821652 + storew %.5024, %.5023 + %.5025 =l add %.4958, 124 + %.5026 =w copy 1738135665 + storew %.5026, %.5025 + %.5027 =l add %.4958, 128 + %.5028 =w copy 2531821652 + storew %.5028, %.5027 + %.5029 =l add %.4958, 132 + %.5030 =w copy 2451567122 + storew %.5030, %.5029 + %.5031 =l add %.4958, 136 + %.5032 =w copy 0 + storew %.5032, %.5031 + %.5033 =l add %.4958, 140 + %.5034 =w copy 1738135665 + storew %.5034, %.5033 + %.5035 =l add %.4958, 144 + %.5036 =w copy 0 + storew %.5036, %.5035 + %.5037 =l add %.4958, 148 + %.5038 =w copy 2451567122 + storew %.5038, %.5037 + %.5039 =l add %.4958, 152 + %.5040 =w copy 2531821652 + storew %.5040, %.5039 + %.5041 =l add %.4958, 156 + %.5042 =w copy 1738135665 + storew %.5042, %.5041 + %.5043 =l add %.4958, 160 + %.5044 =w copy 862420352 + storew %.5044, %.5043 + %.5045 =l add %.4958, 164 + %.5046 =w copy 2451567122 + storew %.5046, %.5045 + %.5047 =l add %.4958, 168 + %.5048 =w copy 3109269297 + storew %.5048, %.5047 + %.5049 =l add %.4958, 172 + %.5050 =w copy 2 + storew %.5050, %.5049 + %.5051 =l add %.4958, 176 + %.5052 =w copy 862420352 + storew %.5052, %.5051 + %.5053 =l add %.4958, 180 + %.5054 =w copy 2 + storew %.5054, %.5053 + %.5055 =l add %.4958, 184 + %.5056 =w copy 3109269297 + storew %.5056, %.5055 + %.5057 =l add %.4958, 188 + %.5058 =w copy 2451567122 + storew %.5058, %.5057 + %.5059 =l add %.4958, 192 + %.5060 =w copy 862420352 + storew %.5060, %.5059 + %.5061 =l add %.4958, 196 + %.5062 =w copy 2451567122 + storew %.5062, %.5061 + %.5063 =l add %.4958, 200 + %.5064 =w copy 0 + storew %.5064, %.5063 + %.5065 =l add %.4958, 204 + %.5066 =w copy 2 + storew %.5066, %.5065 + %.5067 =l add %.4958, 208 + %.5068 =w copy 2531821652 + storew %.5068, %.5067 + %.5069 =l add %.4958, 212 + %.5070 =w copy 6 + storew %.5070, %.5069 + %.5071 =l add %.4958, 216 + %.5072 =w copy 2531821652 + storew %.5072, %.5071 + %.5073 =l add %.4958, 220 + %.5074 =w copy 2 + storew %.5074, %.5073 + %.5075 =l add %.4958, 224 + %.5076 =w copy 0 + storew %.5076, %.5075 + %.5077 =l add %.4958, 228 + %.5078 =w copy 6 + storew %.5078, %.5077 + %.5079 =l add %.4958, 232 + %.5080 =w copy 0 + storew %.5080, %.5079 + %.5081 =l add %.4958, 236 + %.5082 =w copy 2 + storew %.5082, %.5081 + %.5083 =l add %.4958, 240 + %.5084 =w copy 862420352 + storew %.5084, %.5083 + %.5085 =l add %.4958, 244 + %.5086 =w copy 6 + storew %.5086, %.5085 + %.5087 =l add %.4958, 248 + %.5088 =l extsw 0 + %.5089 =l sub %.5088, 1 + %.5090 =w copy %.5089 + storew %.5090, %.5087 + %.5091 =l add %.4958, 252 + %.5092 =w copy 6 + storew %.5092, %.5091 + %.5093 =l add %.4958, 256 + %.5094 =w copy 862420352 + storew %.5094, %.5093 + %.5095 =l add %.4958, 260 + %.5096 =w copy 1738135665 + storew %.5096, %.5095 + %.5097 =l add %.4958, 264 + %.5098 =l extsw 0 + %.5099 =l sub %.5098, 1 + %.5100 =w copy %.5099 + storew %.5100, %.5097 + %.5101 =l add %.4958, 268 + %.5102 =w copy 1738135665 + storew %.5102, %.5101 + %.5103 =l add %.4958, 272 + %.5104 =w copy 862420352 + storew %.5104, %.5103 + %.5105 =l add %.4958, 276 + %.5106 =w copy 6 + storew %.5106, %.5105 + %.5107 =l add %.4958, 280 + %.5108 =w copy 2531821652 + storew %.5108, %.5107 + %.5109 =l add %.4958, 284 + %.5110 =w copy 6 + storew %.5110, %.5109 + %.5111 =l add %.4958, 288 + %.5112 =w copy 2531821652 + storew %.5112, %.5111 + %.5113 =l add %.4958, 292 + %.5114 =w copy 2 + storew %.5114, %.5113 + %.5115 =l add %.4958, 296 + %.5116 =w copy 0 + storew %.5116, %.5115 + %.5117 =l add %.4958, 300 + %.5118 =w copy 6 + storew %.5118, %.5117 + %.5119 =l add %.4958, 304 + %.5120 =w copy 0 + storew %.5120, %.5119 + %.5121 =l add %.4958, 308 + %.5122 =w copy 2 + storew %.5122, %.5121 + %.5123 =l add %.4958, 312 + %.5124 =w copy 2531821652 + storew %.5124, %.5123 + %.5125 =l add %.4958, 316 + %.5126 =w copy 6 + storew %.5126, %.5125 + %.5127 =l add %.4958, 320 + %.5128 =w copy 862420352 + storew %.5128, %.5127 + %.5129 =l add %.4958, 324 + %.5130 =w copy 2 + storew %.5130, %.5129 + %.5131 =l add %.4958, 328 + %.5132 =w copy 3109269297 + storew %.5132, %.5131 + %.5133 =l add %.4958, 332 + %.5134 =w copy 2451567122 + storew %.5134, %.5133 + %.5135 =l add %.4958, 336 + %.5136 =w copy 862420352 + storew %.5136, %.5135 + %.5137 =l add %.4958, 340 + %.5138 =w copy 2451567122 + storew %.5138, %.5137 + %.5139 =l add %.4958, 344 + %.5140 =w copy 3109269297 + storew %.5140, %.5139 + %.5141 =l add %.4958, 348 + %.5142 =w copy 2 + storew %.5142, %.5141 + %.5143 =l add %.4958, 352 + %.5144 =w copy 862420352 + storew %.5144, %.5143 + %.5145 =l add %.4958, 356 + %.5146 =w copy 2 + storew %.5146, %.5145 + %.5148 =l add %.5147, 0 + storel $g_82, %.5148 + %.5150 =l add %.5149, 0 + %.5151 =l extsw 0 + %.5152 =l copy %.5151 + storel %.5152, %.5150 + %.5155 =l extsw 2 + %.5156 =l mul %.5155, 8 + %.5157 =l add %.2013, %.5156 + %.5158 =l loadl %.5157 + %.5159 =l extsw 0 + %.5160 =w ceql %.5158, %.5159 + %.5161 =l loadl $g_173 + storew %.5160, %.5161 + %.5162 =w loadsw %.4955 + %.5163 =l loadl %.2143 + %.5164 =l loadl %.4953 + %.5165 =w ceql %.5163, %.5164 + %.5166 =w copy %.5165 + %.5167 =l copy $g_265 + %.5168 =l mul 44, 1 + %.5169 =l add %.5167, %.5168 + %.5170 =l copy %.5169 + %.5171 =w loadsw %.5170 + %.5172 =w copy %.5171 + %.5173 =w call $safe_mod_func_uint8_t_u_u(w %.5166, w %.5172) + %.5174 =l loadl %.2128 + %.5175 =l loadl %.5174 + %.5176 =w loadsw %.5175 + %.5177 =l extsw 0 + %.5178 =w ceql %.5177, $g_296 + %.5179 =l copy $g_265 + %.5180 =l mul 0, 1 + %.5181 =l add %.5179, %.5180 + %.5182 =l copy %.5181 + %.5183 =w loadub %.5182 + %.5184 =w extub %.5183 + %.5185 =w or %.5178, %.5184 + %.5186 =l extsw %.5185 + %.5187 =l copy $g_185 + %.5188 =l mul 24, 1 + %.5189 =l add %.5187, %.5188 + %.5190 =l copy %.5189 + %.5191 =l loadl %.5190 + %.5192 =w copy %.5191 + %.5193 =w loadub $g_57 + %.5194 =l loadl $g_88 + %.5195 =l loadl %.5194 + %.5196 =l loadl %.5195 + %.5197 =w loadsw %.5196 + %.5198 =l copy $g_185 + %.5199 =l mul 0, 1 + %.5200 =l add %.5198, %.5199 + %.5201 =l copy %.5200 + %.5202 =w loadub %.5201 + %.5203 =w extub %.5202 + %.5204 =w call $safe_mod_func_int16_t_s_s(w %.5192, w %.5203) + %.5205 =w copy %.5204 + %.5206 =w loadub $g_57 + %.5207 =w call $safe_mul_func_uint8_t_u_u(w %.5205, w %.5206) + %.5208 =l copy $g_265 + %.5209 =l mul 24, 1 + %.5210 =l add %.5208, %.5209 + %.5211 =l copy %.5210 + %.5212 =l loadl %.5211 + %.5213 =w ceql %.5186, %.5212 + %.5214 =l extsw %.5213 + %.5215 =l copy $g_265 + %.5216 =l mul 8, 1 + %.5217 =l add %.5215, %.5216 + %.5218 =l copy %.5217 + %.5219 =l loadl %.5218 + %.5220 =w cnel %.5214, %.5219 + %.5221 =w copy %.5220 + %.5222 =w loadub %.6 + %.5223 =w extub %.5222 + %.5224 =w call $safe_lshift_func_uint16_t_u_s(w %.5221, w %.5223) + %.5225 =w loaduw %.4 + %.5226 =w loadub %.6 + %.5227 =l extub %.5226 + %.5228 =l xor %.5227, 36 + %.5229 =w cnel %.5228, 0 + jnz %.5229, @logic_right.1307, @logic_join.1308 +@logic_right.1307 + %.5230 =w cnel 21158, 0 +@logic_join.1308 + %.5231 =w phi @if_true.1305 %.5229, @logic_right.1307 %.5230 + %.5232 =l loadl $g_88 + %.5233 =l loadl %.5232 + %.5234 =l loadl %.5233 + %.5235 =w loadsw %.5234 + %.5236 =w cnew %.5231, %.5235 + %.5237 =w copy %.5236 + %.5238 =w call $safe_mul_func_uint8_t_u_u(w %.5173, w %.5237) + %.5239 =l loadl $g_173 + %.5240 =w loadsw %.5239 + %.5241 =l extsw %.5240 + storel %.5241, %.4433 + %.5242 =l extsw 0 + %.5243 =l mul %.5242, 8 + %.5244 =l add %.2145, %.5243 + %.5245 =l loadl %.5244 + %.5246 =l loadl %.3883 + %.5247 =w ceql %.5245, %.5246 + %.5248 =w or %.5162, %.5247 + storew %.5248, %.4955 + %.5249 =l loadl %.2895 + %.5250 =l copy $g_185 + %.5251 =l mul 8, 1 + %.5252 =l add %.5250, %.5251 + %.5253 =l copy %.5252 + %.5254 =l loadl %.5253 + %.5255 =w loadsb %.2872 + %.5256 =w extsb %.5255 + %.5257 =l extsw 0 + %.5258 =w cnel %.3880, %.5257 + %.5259 =l copy $g_130 + %.5260 =l mul 8, 1 + %.5261 =l add %.5259, %.5260 + %.5262 =l copy %.5261 + %.5263 =w loadsh %.5262 + %.5264 =w copy %.5263 + %.5265 =l copy 39984 + %.5266 =w cultl 0, %.5265 + %.5267 =l extsw %.5266 + %.5268 =w ceql 1, %.5267 + %.5269 =w ceqw %.5268, 0 + %.5270 =l extsw %.5269 + %.5271 =w cugtl 65532, %.5270 + %.5272 =l copy $g_185 + %.5273 =l mul 48, 1 + %.5274 =l add %.5272, %.5273 + %.5275 =l copy %.5274 + %.5276 =w loadsw %.5275 + %.5277 =w xor %.5271, %.5276 + %.5278 =w copy %.5277 + %.5279 =w call $safe_div_func_uint8_t_u_u(w %.5264, w %.5278) + %.5280 =w loadub %.6 + %.5281 =w extub %.5279 + %.5282 =w extub %.5280 + %.5283 =w cugew %.5281, %.5282 + %.5284 =w loadsb %.2872 + %.5285 =w extsb %.5284 + %.5286 =w csgew %.5283, %.5285 + %.5287 =w copy %.5286 + %.5288 =w copy 27268 + %.5289 =w call $safe_mul_func_uint16_t_u_u(w %.5287, w %.5288) + %.5290 =w extuh %.5289 + storew %.5290, %.4955 + %.5291 =w cnew %.5290, 0 + jnz %.5291, @logic_join.1316, @logic_right.1315 +@logic_right.1315 + %.5292 =w cnel 0, 0 +@logic_join.1316 + %.5293 =w phi @logic_join.1308 %.5291, @logic_right.1315 %.5292 + %.5294 =l extsw 8 + %.5295 =l mul %.5294, 40 + %.5296 =l add %.4958, %.5295 + %.5297 =l extsw 4 + %.5298 =l mul %.5297, 4 + %.5299 =l add %.5296, %.5298 + %.5300 =w loadsw %.5299 + %.5301 =w csgew %.5293, %.5300 + %.5302 =w copy %.5301 + %.5303 =l loadl $g_173 + %.5304 =w loadsw %.5303 + %.5305 =w copy %.5304 + %.5306 =w call $safe_add_func_uint32_t_u_u(w %.5302, w %.5305) + %.5307 =l extuw %.5306 + %.5308 =w ceql %.5307, 4 + %.5309 =l extsw %.5308 + %.5310 =l loadl %.4436 + %.5311 =w cugel %.5309, %.5310 + %.5312 =w copy %.5311 + %.5313 =l loadl %.4436 + %.5314 =w copy %.5313 + %.5315 =w call $safe_mul_func_uint8_t_u_u(w %.5312, w %.5314) + %.5316 =w extub %.5315 + %.5317 =w or %.5256, %.5316 + %.5318 =w copy %.5317 + %.5319 =w loaduw $g_115 + %.5320 =w cugew %.5318, %.5319 + %.5321 =l extsw 0 + %.5322 =l loadl %.2 + %.5323 =w ceql %.5321, %.5322 + %.5324 =w cnew %.5323, 0 + jnz %.5324, @logic_join.1314, @logic_right.1313 +@logic_right.1313 + %.5325 =w loadub %.6 + %.5326 =w extub %.5325 + %.5327 =w cnew %.5326, 0 +@logic_join.1314 + %.5328 =w phi @logic_join.1316 %.5324, @logic_right.1313 %.5327 + %.5329 =w copy %.5328 + %.5330 =w loaduw %.4 + %.5331 =w or %.5329, %.5330 + %.5332 =w xor %.5331, 18446744073709551615 + %.5333 =w copy %.5332 + %.5334 =w loadsb %.2872 + %.5335 =w extsb %.5334 + %.5336 =w call $safe_rshift_func_int16_t_s_s(w %.5333, w %.5335) + %.5337 =w extsh %.5336 + %.5338 =w loadsb %.2872 + %.5339 =w extsb %.5338 + %.5340 =w call $safe_sub_func_uint32_t_u_u(w %.5337, w %.5339) + %.5341 =l extuw %.5340 + %.5342 =l copy $g_185 + %.5343 =l mul 8, 1 + %.5344 =l add %.5342, %.5343 + %.5345 =l copy %.5344 + %.5346 =l loadl %.5345 + %.5347 =w ceql %.5341, %.5346 + %.5348 =w copy %.5347 + %.5349 =w loaduw %.2146 + %.5350 =w copy %.5349 + %.5351 =w call $safe_mul_func_uint8_t_u_u(w %.5348, w %.5350) + %.5352 =l extub %.5351 + %.5353 =l loadl $g_82 + %.5354 =w csgel %.5352, %.5353 + %.5355 =w copy %.5354 + %.5356 =l extsw 8 + %.5357 =l mul %.5356, 40 + %.5358 =l add %.4958, %.5357 + %.5359 =l extsw 4 + %.5360 =l mul %.5359, 4 + %.5361 =l add %.5358, %.5360 + %.5362 =w loadsw %.5361 + %.5363 =w copy %.5362 + %.5364 =w call $safe_div_func_uint8_t_u_u(w %.5355, w %.5363) + %.5365 =l extub %.5364 + %.5366 =l copy $g_185 + %.5367 =l mul 24, 1 + %.5368 =l add %.5366, %.5367 + %.5369 =l copy %.5368 + %.5370 =l loadl %.5369 + %.5371 =w ceql %.5365, %.5370 + %.5372 =l extsw %.5371 + %.5373 =w cslel %.5372, 4224946571 + %.5374 =w copy %.5373 + %.5375 =l copy $g_185 + %.5376 =l mul 36, 1 + %.5377 =l add %.5375, %.5376 + %.5378 =l copy %.5377 + %.5379 =w loaduw %.5378 + %.5380 =w culew %.5374, %.5379 + %.5381 =w copy %.5380 + %.5382 =w loaduw %.4 + %.5383 =w ceqw %.5381, %.5382 + %.5384 =w loadub %.6 + %.5385 =w extub %.5384 + %.5386 =w ceqw %.5383, %.5385 + %.5387 =w copy %.5386 + %.5388 =w loadub %.6 + %.5389 =w call $safe_div_func_uint8_t_u_u(w %.5387, w %.5388) + %.5390 =l copy $g_130 + %.5391 =l mul 4, 1 + %.5392 =l add %.5390, %.5391 + %.5393 =l copy %.5392 + %.5394 =w loaduw %.5393 + %.5395 =w copy %.5394 + %.5396 =w call $safe_mul_func_uint8_t_u_u(w %.5389, w %.5395) + %.5397 =l extub %.5396 + %.5398 =l loadl %.5147 + storel %.5397, %.5398 + %.5399 =w copy 41088 + %.5400 =w loadsh $g_81 + %.5401 =w copy %.5400 + %.5402 =w call $safe_mul_func_uint16_t_u_u(w %.5399, w %.5401) + %.5403 =w extuh %.5402 + %.5404 =w cnew %.5403, 0 + jnz %.5404, @logic_join.1312, @logic_right.1311 +@logic_right.1311 + %.5405 =w loadsw %.3885 + %.5406 =w cnew %.5405, 0 +@logic_join.1312 + %.5407 =w phi @logic_join.1314 %.5404, @logic_right.1311 %.5406 + %.5408 =w cnew %.5407, 0 + jnz %.5408, @logic_right.1309, @logic_join.1310 +@logic_right.1309 + %.5409 =w cnel 0, 0 +@logic_join.1310 + %.5410 =w phi @logic_join.1312 %.5408, @logic_right.1309 %.5409 + %.5411 =l loadl $g_363 + %.5412 =w ceql %.5249, %.5411 + %.5413 =w loadub %.6 + %.5414 =w extub %.5413 + %.5415 =w csgew %.5412, %.5414 + %.5416 =w copy %.5415 + %.5417 =w loadsb %.2872 + %.5418 =w copy %.5417 + %.5419 =w call $safe_add_func_uint8_t_u_u(w %.5416, w %.5418) + %.5420 =l extub %.5419 + %.5421 =w csltl %.5420, 1 + %.5422 =l loadl %.5149 + %.5423 =l loadl %.2893 + storel %.5422, %.5423 + %.5424 =l loadl $g_88 + %.5425 =l loadl %.5424 + %.5426 =l loadl %.5425 + ret %.5426 +@if_false.1306 + %.5427 =l loadl $g_38 + %.5428 =l loadl %.5427 + ret %.5428 +@if_join.1317 + jmp @if_join.1318 +@if_false.1287 + %.5429 =w loadub %.2023 + %.5430 =w sub %.5429, 1 + storeb %.5430, %.2023 +@if_join.1318 + jmp @if_join.1319 +@if_false.1259 + %.5431 =l loadl %.2 + ret %.5431 +@if_join.1319 + %.5432 =w sub 0, 22 + %.5433 =l copy $g_265 + %.5434 =l mul 48, 1 + %.5435 =l add %.5433, %.5434 + %.5436 =l copy %.5435 + storew %.5432, %.5436 +@for_cond.1320 + %.5437 =l copy $g_265 + %.5438 =l mul 48, 1 + %.5439 =l add %.5437, %.5438 + %.5440 =l copy %.5439 + %.5441 =w loadsw %.5440 + %.5442 =w sub 0, 16 + %.5443 =w ceqw %.5441, %.5442 + jnz %.5443, @for_body.1321, @for_join.1323 +@for_body.1321 + %.5445 =l add %.5444, 0 + %.5446 =l extsw 0 + %.5447 =l sub %.5446, 9 + %.5448 =w copy %.5447 + storew %.5448, %.5445 + %.5450 =l add %.5449, 0 + %.5451 =w copy 8 + storeh %.5451, %.5450 + %.5453 =l add %.5452, 0 + %.5454 =l extsw 0 + %.5455 =l copy %.5454 + storel %.5455, %.5453 + %.5456 =w loadsw %.2886 + %.5457 =w cnew %.5456, 0 + jnz %.5457, @if_true.1324, @if_false.1325 +@if_true.1324 + jmp @lbl_234.1237 +@if_false.1325 + %.5458 =l extsw 0 + storel %.5458, $g_80 +@for_cond.1326 + %.5459 =l loadl $g_80 + %.5460 =l extsw 9 + %.5461 =w csltl %.5459, %.5460 + jnz %.5461, @for_body.1327, @for_join.1329 +@for_body.1327 + %.5462 =l loadl %.2 + ret %.5462 +@for_cont.1328 + %.5463 =l loadl $g_80 + %.5464 =l add %.5463, 1 + storel %.5464, $g_80 + jmp @for_cond.1326 +@for_join.1329 + %.5465 =l extsw 0 + %.5466 =l extsw 0 + %.5467 =l mul %.5466, 24 + %.5468 =l add %.2897, %.5467 + %.5469 =l extsw 0 + %.5470 =l mul %.5469, 24 + %.5471 =l add %.5468, %.5470 + %.5472 =l extsw 1 + %.5473 =l mul %.5472, 8 + %.5474 =l add %.5471, %.5473 + %.5475 =l loadl %.5474 + %.5476 =w ceql %.5465, %.5475 + %.5477 =w copy %.5476 + %.5478 =w copy 48769 + %.5479 =w call $safe_add_func_uint16_t_u_u(w %.5477, w %.5478) + %.5480 =l extuh %.5479 + %.5481 =l loadl $g_82 + %.5482 =w csgel %.5480, %.5481 + %.5483 =l loadl $g_173 + storew %.5482, %.5483 + %.5484 =l loadl $g_173 + %.5485 =w loadsw %.5484 + %.5486 =l copy $g_130 + %.5487 =l mul 16, 1 + %.5488 =l add %.5486, %.5487 + %.5489 =l copy %.5488 + %.5490 =w loaduw %.5489 + %.5491 =l extuw %.5490 + %.5492 =l and %.5491, 12268102678362359027 + %.5493 =l extsw 1 + %.5494 =l mul %.5493, 48 + %.5495 =l add %.2935, %.5494 + %.5496 =l extsw 5 + %.5497 =l mul %.5496, 8 + %.5498 =l add %.5495, %.5497 + %.5499 =l extsw 4 + %.5500 =l mul %.5499, 1 + %.5501 =l add %.5498, %.5500 + %.5502 =w loadsb %.5501 + %.5503 =l extsb %.5502 + %.5504 =l extsw 0 + %.5505 =l extsw 0 + %.5506 =l mul %.5505, 8 + %.5507 =l add %.2145, %.5506 + %.5508 =l loadl %.5507 + %.5509 =w cnel %.5504, %.5508 + %.5510 =w loadsw %.5444 + %.5511 =w cnew %.5510, 0 + jnz %.5511, @logic_right.1330, @logic_join.1331 +@logic_right.1330 + %.5512 =w loaduh %.5449 + %.5513 =l loadl %.5452 + %.5514 =l extsw 0 + %.5515 =w cnel %.5513, %.5514 + %.5516 =w cnew %.5515, 0 +@logic_join.1331 + %.5517 =w phi @for_join.1329 %.5511, @logic_right.1330 %.5516 + %.5518 =l loadl %.2128 + %.5519 =l loadl %.5518 + %.5520 =w loadsw %.5519 + %.5521 =l extsw %.5520 + %.5522 =l and %.5521, 2796228265 + %.5523 =w loadsw %.2149 + %.5524 =l extsw %.5523 + %.5525 =w ceql %.5522, %.5524 + %.5526 =w ceqw %.5525, 0 + %.5527 =w copy %.5526 + %.5528 =w loaduw $g_115 + %.5529 =w copy %.5528 + %.5530 =w call $safe_mod_func_int16_t_s_s(w %.5527, w %.5529) + %.5531 =l loadl %.2026 + %.5532 =l extsw 0 + %.5533 =w cnel %.5531, %.5532 + %.5534 =w copy %.5533 + %.5535 =w call $safe_rshift_func_uint16_t_u_s(w %.5534, w 11) + %.5536 =w extuh %.5535 + %.5537 =w loaduw %.4 + %.5538 =w xor %.5536, %.5537 + %.5539 =w loadsb %.2872 + %.5540 =l extsb %.5539 + %.5541 =w csgtl 0, %.5540 + %.5542 =w cnew %.5509, %.5541 + %.5543 =l copy 6 + %.5544 =l call $safe_mod_func_uint64_t_u_u(l %.5503, l %.5543) + %.5545 =l copy $g_185 + %.5546 =l mul 16, 1 + %.5547 =l add %.5545, %.5546 + %.5548 =l copy %.5547 + %.5549 =w loadsw %.5548 + %.5550 =l extsw %.5549 + %.5551 =w cultl %.5544, %.5550 + %.5552 =l extsw %.5551 + %.5553 =w cugel %.5492, %.5552 + %.5554 =w copy %.5553 + %.5555 =w call $safe_rshift_func_uint8_t_u_s(w %.5554, w 3) + %.5556 =w extub %.5555 + %.5557 =w and %.5485, %.5556 + storew %.5557, %.5484 +@for_cont.1322 + %.5558 =l copy $g_265 + %.5559 =l mul 48, 1 + %.5560 =l add %.5558, %.5559 + %.5561 =l copy %.5560 + %.5562 =w loadsw %.5561 + %.5563 =w add %.5562, 1 + storew %.5563, %.5561 + jmp @for_cond.1320 +@for_join.1323 + %.5564 =l extsw 0 + %.5565 =l sub %.5564, 10 + %.5566 =w loadub %.6 + %.5567 =w extub %.5566 + %.5568 =w cnew %.5567, 0 + jnz %.5568, @logic_right.1332, @logic_join.1333 +@logic_right.1332 + %.5569 =l loadl $g_394 + %.5570 =l extsw 0 + %.5571 =w ceql %.5569, %.5570 + %.5572 =l extsw %.5571 + %.5573 =l loadl %.3468 + storel %.5572, %.5573 + %.5574 =l copy %.5572 + %.5575 =l extsw 0 + %.5576 =l sub %.5575, 9 + %.5577 =l copy %.5576 + %.5578 =l extsw 0 + %.5579 =w cnel %.5578, %.2143 + %.5580 =l extsw %.5579 + storel %.5580, $g_399 + %.5581 =l and %.5577, %.5580 + %.5582 =w ceql %.5574, %.5581 + %.5583 =l extsw 0 + %.5584 =l sub %.5583, 3 + %.5585 =w loaduw %.4 + %.5586 =l extuw %.5585 + %.5587 =w csgel %.5584, %.5586 + %.5588 =w ceqw %.5582, %.5587 + %.5589 =l extsw %.5588 + %.5590 =w loaduw %.4 + %.5591 =l extuw %.5590 + %.5592 =l call $safe_add_func_int64_t_s_s(l %.5589, l %.5591) + %.5593 =w cnel %.5592, 0 +@logic_join.1333 + %.5594 =w phi @for_join.1323 %.5568, @logic_right.1332 %.5593 + %.5595 =w copy %.5594 + %.5596 =l loadl %.2005 + storeb %.5595, %.5596 + %.5597 =l extsb %.5595 + %.5598 =w csgel %.5565, %.5597 + %.5599 =l loadl $g_173 + storew %.5598, %.5599 +@for_cont.1254 + %.5600 =l copy $g_130 + %.5601 =l mul 4, 1 + %.5602 =l add %.5600, %.5601 + %.5603 =l copy %.5602 + %.5604 =w loaduw %.5603 + %.5605 =w add %.5604, 1 + storew %.5605, %.5603 + jmp @for_cond.1252 +@for_join.1255 + %.5606 =w copy 0 + storeb %.5606, $g_46 +@for_cond.1334 + %.5607 =w loadub $g_46 + %.5608 =w extub %.5607 + %.5609 =w cslew %.5608, 5 + jnz %.5609, @for_body.1335, @for_join.1337 +@for_body.1335 + %.5611 =l add %.5610, 0 + %.5612 =w copy 27126 + storeh %.5612, %.5611 + %.5614 =l add %.5613, 0 + %.5615 =l extsw 0 + %.5616 =l copy %.5615 + storel %.5616, %.5614 + %.5618 =l add %.5617, 0 + %.5619 =l extsw 0 + %.5620 =l copy %.5619 + storel %.5620, %.5618 + %.5622 =l add %.5621, 0 + storel $g_425, %.5622 + %.5623 =l add %.5621, 8 + storel $g_425, %.5623 + %.5624 =l add %.5621, 16 + storel $g_425, %.5624 + %.5625 =l add %.5621, 24 + storel $g_425, %.5625 + %.5626 =l add %.5621, 32 + storel $g_425, %.5626 + %.5627 =l add %.5621, 40 + storel $g_425, %.5627 + %.5628 =l add %.5621, 48 + storel $g_425, %.5628 + %.5630 =l add %.5629, 0 + storel $g_58, %.5630 + %.5632 =l add %.5631, 0 + %.5633 =l extsw 2 + %.5634 =l mul %.5633, 320 + %.5635 =l add %.7, %.5634 + %.5636 =l extsw 2 + %.5637 =l mul %.5636, 64 + %.5638 =l add %.5635, %.5637 + %.5639 =l extsw 1 + %.5640 =l mul %.5639, 8 + %.5641 =l add %.5638, %.5640 + storel %.5641, %.5632 + %.5642 =l add %.5631, 8 + %.5643 =l extsw 2 + %.5644 =l mul %.5643, 320 + %.5645 =l add %.7, %.5644 + %.5646 =l extsw 2 + %.5647 =l mul %.5646, 64 + %.5648 =l add %.5645, %.5647 + %.5649 =l extsw 1 + %.5650 =l mul %.5649, 8 + %.5651 =l add %.5648, %.5650 + storel %.5651, %.5642 + %.5652 =l add %.5631, 16 + %.5653 =l extsw 2 + %.5654 =l mul %.5653, 320 + %.5655 =l add %.7, %.5654 + %.5656 =l extsw 2 + %.5657 =l mul %.5656, 64 + %.5658 =l add %.5655, %.5657 + %.5659 =l extsw 1 + %.5660 =l mul %.5659, 8 + %.5661 =l add %.5658, %.5660 + storel %.5661, %.5652 + %.5662 =l add %.5631, 24 + %.5663 =l extsw 2 + %.5664 =l mul %.5663, 320 + %.5665 =l add %.7, %.5664 + %.5666 =l extsw 2 + %.5667 =l mul %.5666, 64 + %.5668 =l add %.5665, %.5667 + %.5669 =l extsw 1 + %.5670 =l mul %.5669, 8 + %.5671 =l add %.5668, %.5670 + storel %.5671, %.5662 + %.5672 =l add %.5631, 32 + %.5673 =l extsw 2 + %.5674 =l mul %.5673, 320 + %.5675 =l add %.7, %.5674 + %.5676 =l extsw 2 + %.5677 =l mul %.5676, 64 + %.5678 =l add %.5675, %.5677 + %.5679 =l extsw 1 + %.5680 =l mul %.5679, 8 + %.5681 =l add %.5678, %.5680 + storel %.5681, %.5672 + %.5682 =l add %.5631, 40 + %.5683 =l extsw 2 + %.5684 =l mul %.5683, 320 + %.5685 =l add %.7, %.5684 + %.5686 =l extsw 2 + %.5687 =l mul %.5686, 64 + %.5688 =l add %.5685, %.5687 + %.5689 =l extsw 1 + %.5690 =l mul %.5689, 8 + %.5691 =l add %.5688, %.5690 + storel %.5691, %.5682 + %.5692 =l add %.5631, 48 + %.5693 =l extsw 2 + %.5694 =l mul %.5693, 320 + %.5695 =l add %.7, %.5694 + %.5696 =l extsw 2 + %.5697 =l mul %.5696, 64 + %.5698 =l add %.5695, %.5697 + %.5699 =l extsw 1 + %.5700 =l mul %.5699, 8 + %.5701 =l add %.5698, %.5700 + storel %.5701, %.5692 + %.5702 =l add %.5631, 56 + %.5703 =l extsw 2 + %.5704 =l mul %.5703, 320 + %.5705 =l add %.7, %.5704 + %.5706 =l extsw 2 + %.5707 =l mul %.5706, 64 + %.5708 =l add %.5705, %.5707 + %.5709 =l extsw 1 + %.5710 =l mul %.5709, 8 + %.5711 =l add %.5708, %.5710 + storel %.5711, %.5702 + %.5712 =l add %.5631, 64 + %.5713 =l extsw 2 + %.5714 =l mul %.5713, 320 + %.5715 =l add %.7, %.5714 + %.5716 =l extsw 2 + %.5717 =l mul %.5716, 64 + %.5718 =l add %.5715, %.5717 + %.5719 =l extsw 1 + %.5720 =l mul %.5719, 8 + %.5721 =l add %.5718, %.5720 + storel %.5721, %.5712 + %.5722 =l add %.5631, 72 + %.5723 =l extsw 2 + %.5724 =l mul %.5723, 320 + %.5725 =l add %.7, %.5724 + %.5726 =l extsw 2 + %.5727 =l mul %.5726, 64 + %.5728 =l add %.5725, %.5727 + %.5729 =l extsw 1 + %.5730 =l mul %.5729, 8 + %.5731 =l add %.5728, %.5730 + storel %.5731, %.5722 + %.5733 =l add %.5732, 0 + %.5734 =w copy 3 + storew %.5734, %.5733 + %.5736 =l add %.5735, 0 + %.5737 =w copy 3172288781 + storew %.5737, %.5736 + %.5738 =l add %.5735, 4 + %.5739 =w copy 18446744073709551615 + storew %.5739, %.5738 + %.5740 =l add %.5735, 8 + %.5741 =l extsw 0 + %.5742 =l sub %.5741, 3 + %.5743 =w copy %.5742 + storeh %.5743, %.5740 + %.5744 =l add %.5735, 10 + storeh 0, %.5744 + %.5745 =l add %.5735, 12 + %.5746 =w copy 2 + storew %.5746, %.5745 + %.5747 =l add %.5735, 16 + %.5748 =w copy 1389690011 + storew %.5748, %.5747 + %.5750 =l add %.5749, 0 + %.5751 =l extsw 2 + %.5752 =l mul %.5751, 360 + %.5753 =l add %.250, %.5752 + %.5754 =l extsw 1 + %.5755 =l mul %.5754, 120 + %.5756 =l add %.5753, %.5755 + %.5757 =l extsw 1 + %.5758 =l mul %.5757, 20 + %.5759 =l add %.5756, %.5758 + %.5760 =l copy %.5759 + %.5761 =l mul 12, 1 + %.5762 =l add %.5760, %.5761 + %.5763 =l copy %.5762 + storel %.5763, %.5750 + %.5764 =l add %.5749, 8 + %.5765 =l extsw 2 + %.5766 =l mul %.5765, 360 + %.5767 =l add %.250, %.5766 + %.5768 =l extsw 1 + %.5769 =l mul %.5768, 120 + %.5770 =l add %.5767, %.5769 + %.5771 =l extsw 1 + %.5772 =l mul %.5771, 20 + %.5773 =l add %.5770, %.5772 + %.5774 =l copy %.5773 + %.5775 =l mul 12, 1 + %.5776 =l add %.5774, %.5775 + %.5777 =l copy %.5776 + storel %.5777, %.5764 + %.5778 =l add %.5749, 16 + %.5779 =l copy %.5735 + %.5780 =l mul 12, 1 + %.5781 =l add %.5779, %.5780 + %.5782 =l copy %.5781 + storel %.5782, %.5778 + %.5783 =l add %.5749, 24 + %.5784 =l copy %.5735 + %.5785 =l mul 12, 1 + %.5786 =l add %.5784, %.5785 + %.5787 =l copy %.5786 + storel %.5787, %.5783 + %.5788 =l add %.5749, 32 + %.5789 =l extsw 2 + %.5790 =l mul %.5789, 360 + %.5791 =l add %.250, %.5790 + %.5792 =l extsw 1 + %.5793 =l mul %.5792, 120 + %.5794 =l add %.5791, %.5793 + %.5795 =l extsw 1 + %.5796 =l mul %.5795, 20 + %.5797 =l add %.5794, %.5796 + %.5798 =l copy %.5797 + %.5799 =l mul 12, 1 + %.5800 =l add %.5798, %.5799 + %.5801 =l copy %.5800 + storel %.5801, %.5788 + %.5802 =l add %.5749, 40 + %.5803 =l copy $g_130 + %.5804 =l mul 12, 1 + %.5805 =l add %.5803, %.5804 + %.5806 =l copy %.5805 + storel %.5806, %.5802 + %.5807 =l add %.5749, 48 + %.5808 =l extsw 0 + %.5809 =l copy %.5808 + storel %.5809, %.5807 + %.5810 =l add %.5749, 56 + %.5811 =l copy $g_130 + %.5812 =l mul 12, 1 + %.5813 =l add %.5811, %.5812 + %.5814 =l copy %.5813 + storel %.5814, %.5810 + %.5815 =l add %.5749, 64 + %.5816 =l extsw 0 + %.5817 =l copy %.5816 + storel %.5817, %.5815 + %.5818 =l add %.5749, 72 + %.5819 =l copy $g_130 + %.5820 =l mul 12, 1 + %.5821 =l add %.5819, %.5820 + %.5822 =l copy %.5821 + storel %.5822, %.5818 + %.5823 =l add %.5749, 80 + %.5824 =l extsw 2 + %.5825 =l mul %.5824, 360 + %.5826 =l add %.250, %.5825 + %.5827 =l extsw 1 + %.5828 =l mul %.5827, 120 + %.5829 =l add %.5826, %.5828 + %.5830 =l extsw 1 + %.5831 =l mul %.5830, 20 + %.5832 =l add %.5829, %.5831 + %.5833 =l copy %.5832 + %.5834 =l mul 12, 1 + %.5835 =l add %.5833, %.5834 + %.5836 =l copy %.5835 + storel %.5836, %.5823 + %.5837 =l add %.5749, 88 + %.5838 =l copy %.5735 + %.5839 =l mul 12, 1 + %.5840 =l add %.5838, %.5839 + %.5841 =l copy %.5840 + storel %.5841, %.5837 + %.5842 =l add %.5749, 96 + %.5843 =l copy %.5735 + %.5844 =l mul 12, 1 + %.5845 =l add %.5843, %.5844 + %.5846 =l copy %.5845 + storel %.5846, %.5842 + %.5847 =l add %.5749, 104 + %.5848 =l extsw 2 + %.5849 =l mul %.5848, 360 + %.5850 =l add %.250, %.5849 + %.5851 =l extsw 1 + %.5852 =l mul %.5851, 120 + %.5853 =l add %.5850, %.5852 + %.5854 =l extsw 1 + %.5855 =l mul %.5854, 20 + %.5856 =l add %.5853, %.5855 + %.5857 =l copy %.5856 + %.5858 =l mul 12, 1 + %.5859 =l add %.5857, %.5858 + %.5860 =l copy %.5859 + storel %.5860, %.5847 + %.5861 =l add %.5749, 112 + %.5862 =l extsw 2 + %.5863 =l mul %.5862, 360 + %.5864 =l add %.250, %.5863 + %.5865 =l extsw 1 + %.5866 =l mul %.5865, 120 + %.5867 =l add %.5864, %.5866 + %.5868 =l extsw 1 + %.5869 =l mul %.5868, 20 + %.5870 =l add %.5867, %.5869 + %.5871 =l copy %.5870 + %.5872 =l mul 12, 1 + %.5873 =l add %.5871, %.5872 + %.5874 =l copy %.5873 + storel %.5874, %.5861 + %.5875 =l add %.5749, 120 + %.5876 =l extsw 0 + %.5877 =l copy %.5876 + storel %.5877, %.5875 + %.5878 =l add %.5749, 128 + %.5879 =l extsw 0 + %.5880 =l copy %.5879 + storel %.5880, %.5878 + %.5881 =l add %.5749, 136 + %.5882 =l extsw 0 + %.5883 =l copy %.5882 + storel %.5883, %.5881 + %.5884 =l add %.5749, 144 + %.5885 =l extsw 0 + %.5886 =l copy %.5885 + storel %.5886, %.5884 + %.5887 =l add %.5749, 152 + %.5888 =l extsw 0 + %.5889 =l copy %.5888 + storel %.5889, %.5887 + %.5891 =l add %.5890, 0 + storel %.2042, %.5891 + %.5893 =l add %.5892, 0 + %.5894 =w copy 1953940215 + storew %.5894, %.5893 + %.5897 =l copy $g_130 + %.5898 =l mul 12, 1 + %.5899 =l add %.5897, %.5898 + %.5900 =l copy %.5899 + %.5901 =w loadsw %.5900 + %.5902 =w loaduh %.5610 + %.5903 =w copy %.5902 + %.5904 =w loadub %.6 + %.5905 =w loadsh $g_81 + %.5906 =w copy %.5905 + %.5907 =w loadsw %.2125 + storel %.4, $g_422 + %.5908 =w cnel %.4, %.4 + %.5909 =w copy %.5908 + %.5910 =l copy $g_185 + %.5911 =l mul 16, 1 + %.5912 =l add %.5910, %.5911 + %.5913 =l copy %.5912 + %.5914 =w loadsw %.5913 + %.5915 =w copy %.5914 + %.5916 =w call $safe_add_func_uint32_t_u_u(w %.5909, w %.5915) + %.5917 =w copy %.5916 + %.5918 =w loaduh %.5610 + %.5919 =w copy %.5918 + %.5920 =w call $safe_add_func_uint8_t_u_u(w %.5917, w %.5919) + %.5921 =w copy %.5920 + %.5922 =l loadl %.2128 + %.5923 =l loadl %.5922 + %.5924 =w loadsw %.5923 + %.5925 =w copy %.5924 + %.5926 =w call $safe_add_func_int8_t_s_s(w %.5921, w %.5925) + %.5927 =w copy %.5926 + %.5928 =l copy $g_185 + %.5929 =l mul 36, 1 + %.5930 =l add %.5928, %.5929 + %.5931 =l copy %.5930 + %.5932 =w loaduw %.5931 + %.5933 =w copy %.5932 + %.5934 =w call $safe_div_func_uint8_t_u_u(w %.5927, w %.5933) + %.5935 =w copy %.5934 + %.5936 =w loaduw %.4 + %.5937 =w copy %.5936 + %.5938 =w call $safe_div_func_int8_t_s_s(w %.5935, w %.5937) + %.5939 =w extsb %.5938 + %.5940 =w or %.5907, %.5939 + storew %.5940, %.2125 + %.5941 =w copy %.5940 + %.5942 =w call $safe_div_func_uint16_t_u_u(w %.5906, w %.5941) + %.5943 =l extuh %.5942 + %.5944 =w csgtl %.5943, 3030009979941848488 + %.5945 =w copy %.5944 + %.5946 =w loaduw %.4 + %.5947 =w copy %.5946 + %.5948 =w call $safe_mod_func_int8_t_s_s(w %.5945, w %.5947) + %.5949 =l extsb %.5948 + %.5950 =w csltl 4182057465624465012, %.5949 + %.5951 =l extsw %.5950 + %.5952 =w csltl 0, %.5951 + %.5953 =l extsw %.5952 + %.5954 =l copy 16470644504123542939 + %.5955 =l call $safe_div_func_int64_t_s_s(l %.5953, l %.5954) + %.5956 =l copy $g_265 + %.5957 =l mul 36, 1 + %.5958 =l add %.5956, %.5957 + %.5959 =l copy %.5958 + %.5960 =w loaduw %.5959 + %.5961 =l extuw %.5960 + %.5962 =w cnel %.5955, %.5961 + %.5963 =w cnew %.5962, 0 + jnz %.5963, @logic_join.1343, @logic_right.1342 +@logic_right.1342 + %.5964 =w loadsw %.2030 + %.5965 =w cnew %.5964, 0 +@logic_join.1343 + %.5966 =w phi @for_body.1335 %.5963, @logic_right.1342 %.5965 + %.5967 =w copy %.5966 + %.5968 =w call $safe_div_func_uint8_t_u_u(w %.5903, w %.5967) + %.5969 =w extub %.5968 + %.5970 =w and %.5901, %.5969 + %.5971 =w cnew %.5970, 0 + jnz %.5971, @logic_join.1341, @logic_right.1340 +@logic_right.1340 + %.5972 =w cnel 15144162022194725640, 0 +@logic_join.1341 + %.5973 =w phi @logic_join.1343 %.5971, @logic_right.1340 %.5972 + %.5974 =l extsw %.5973 + %.5975 =l loadl %.5629 + storel %.5974, %.5975 + %.5976 =w cnel %.5974, 0 + jnz %.5976, @logic_right.1338, @logic_join.1339 +@logic_right.1338 + %.5977 =w loadub %.6 + %.5978 =w extub %.5977 + %.5979 =w cnew %.5978, 0 +@logic_join.1339 + %.5980 =w phi @logic_join.1341 %.5976, @logic_right.1338 %.5979 + %.5981 =w cnew %.5980, 0 + jnz %.5981, @if_true.1344, @if_false.1345 +@if_true.1344 + %.5983 =l add %.5982, 0 + %.5984 =l extsw 3 + %.5985 =l mul %.5984, 320 + %.5986 =l add %.7, %.5985 + %.5987 =l extsw 3 + %.5988 =l mul %.5987, 64 + %.5989 =l add %.5986, %.5988 + %.5990 =l extsw 1 + %.5991 =l mul %.5990, 8 + %.5992 =l add %.5989, %.5991 + storel %.5992, %.5983 + %.5994 =l add %.5993, 0 + storel $g_81, %.5994 + %.5997 =l add %.5996, 0 + %.5998 =w copy 0 + storeb %.5998, %.5997 + %.6000 =l add %.5999, 0 + %.6001 =l extsw 2 + %.6002 =l mul %.6001, 360 + %.6003 =l add %.250, %.6002 + %.6004 =l extsw 1 + %.6005 =l mul %.6004, 120 + %.6006 =l add %.6003, %.6005 + %.6007 =l extsw 1 + %.6008 =l mul %.6007, 20 + %.6009 =l add %.6006, %.6008 + %.6010 =l copy %.6009 + %.6011 =l mul 8, 1 + %.6012 =l add %.6010, %.6011 + %.6013 =l copy %.6012 + storel %.6013, %.6000 + %.6014 =l add %.5999, 8 + %.6015 =l extsw 2 + %.6016 =l mul %.6015, 360 + %.6017 =l add %.250, %.6016 + %.6018 =l extsw 1 + %.6019 =l mul %.6018, 120 + %.6020 =l add %.6017, %.6019 + %.6021 =l extsw 1 + %.6022 =l mul %.6021, 20 + %.6023 =l add %.6020, %.6022 + %.6024 =l copy %.6023 + %.6025 =l mul 8, 1 + %.6026 =l add %.6024, %.6025 + %.6027 =l copy %.6026 + storel %.6027, %.6014 + %.6028 =l add %.5999, 16 + %.6029 =l extsw 2 + %.6030 =l mul %.6029, 360 + %.6031 =l add %.250, %.6030 + %.6032 =l extsw 1 + %.6033 =l mul %.6032, 120 + %.6034 =l add %.6031, %.6033 + %.6035 =l extsw 1 + %.6036 =l mul %.6035, 20 + %.6037 =l add %.6034, %.6036 + %.6038 =l copy %.6037 + %.6039 =l mul 8, 1 + %.6040 =l add %.6038, %.6039 + %.6041 =l copy %.6040 + storel %.6041, %.6028 + %.6042 =l add %.5999, 24 + %.6043 =l extsw 2 + %.6044 =l mul %.6043, 360 + %.6045 =l add %.250, %.6044 + %.6046 =l extsw 1 + %.6047 =l mul %.6046, 120 + %.6048 =l add %.6045, %.6047 + %.6049 =l extsw 1 + %.6050 =l mul %.6049, 20 + %.6051 =l add %.6048, %.6050 + %.6052 =l copy %.6051 + %.6053 =l mul 8, 1 + %.6054 =l add %.6052, %.6053 + %.6055 =l copy %.6054 + storel %.6055, %.6042 + %.6056 =l add %.5999, 32 + %.6057 =l extsw 2 + %.6058 =l mul %.6057, 360 + %.6059 =l add %.250, %.6058 + %.6060 =l extsw 1 + %.6061 =l mul %.6060, 120 + %.6062 =l add %.6059, %.6061 + %.6063 =l extsw 1 + %.6064 =l mul %.6063, 20 + %.6065 =l add %.6062, %.6064 + %.6066 =l copy %.6065 + %.6067 =l mul 8, 1 + %.6068 =l add %.6066, %.6067 + %.6069 =l copy %.6068 + storel %.6069, %.6056 + %.6070 =l add %.5999, 40 + %.6071 =l extsw 2 + %.6072 =l mul %.6071, 360 + %.6073 =l add %.250, %.6072 + %.6074 =l extsw 1 + %.6075 =l mul %.6074, 120 + %.6076 =l add %.6073, %.6075 + %.6077 =l extsw 1 + %.6078 =l mul %.6077, 20 + %.6079 =l add %.6076, %.6078 + %.6080 =l copy %.6079 + %.6081 =l mul 8, 1 + %.6082 =l add %.6080, %.6081 + %.6083 =l copy %.6082 + storel %.6083, %.6070 + %.6084 =l add %.5999, 48 + %.6085 =l extsw 2 + %.6086 =l mul %.6085, 360 + %.6087 =l add %.250, %.6086 + %.6088 =l extsw 1 + %.6089 =l mul %.6088, 120 + %.6090 =l add %.6087, %.6089 + %.6091 =l extsw 1 + %.6092 =l mul %.6091, 20 + %.6093 =l add %.6090, %.6092 + %.6094 =l copy %.6093 + %.6095 =l mul 8, 1 + %.6096 =l add %.6094, %.6095 + %.6097 =l copy %.6096 + storel %.6097, %.6084 + %.6098 =l add %.5999, 56 + %.6099 =l extsw 2 + %.6100 =l mul %.6099, 360 + %.6101 =l add %.250, %.6100 + %.6102 =l extsw 1 + %.6103 =l mul %.6102, 120 + %.6104 =l add %.6101, %.6103 + %.6105 =l extsw 1 + %.6106 =l mul %.6105, 20 + %.6107 =l add %.6104, %.6106 + %.6108 =l copy %.6107 + %.6109 =l mul 8, 1 + %.6110 =l add %.6108, %.6109 + %.6111 =l copy %.6110 + storel %.6111, %.6098 + %.6112 =l add %.5999, 64 + %.6113 =l extsw 2 + %.6114 =l mul %.6113, 360 + %.6115 =l add %.250, %.6114 + %.6116 =l extsw 1 + %.6117 =l mul %.6116, 120 + %.6118 =l add %.6115, %.6117 + %.6119 =l extsw 1 + %.6120 =l mul %.6119, 20 + %.6121 =l add %.6118, %.6120 + %.6122 =l copy %.6121 + %.6123 =l mul 8, 1 + %.6124 =l add %.6122, %.6123 + %.6125 =l copy %.6124 + storel %.6125, %.6112 + %.6127 =l add %.6126, 0 + %.6128 =l copy $g_518 + %.6129 =l mul 44, 1 + %.6130 =l add %.6128, %.6129 + %.6131 =l copy %.6130 + storel %.6131, %.6127 + storew 0, %.6132 +@for_cond.1346 + %.6133 =w loadsw %.6132 + %.6134 =w csltw %.6133, 3 + jnz %.6134, @for_body.1347, @for_join.1349 +@for_body.1347 + %.6135 =w copy 1 + %.6136 =w loadsw %.6132 + %.6137 =l extsw %.6136 + %.6138 =l mul %.6137, 4 + %.6139 =l add %.5995, %.6138 + storew %.6135, %.6139 +@for_cont.1348 + %.6140 =w loadsw %.6132 + %.6141 =w add %.6140, 1 + storew %.6141, %.6132 + jmp @for_cond.1346 +@for_join.1349 + %.6142 =l loadl %.5982 + %.6143 =l loadl %.2128 + %.6144 =w cnel %.6142, %.6143 + %.6145 =l copy 18446744073709551609 + %.6146 =l extsw 0 + %.6147 =l extsw 2 + %.6148 =l mul %.6147, 360 + %.6149 =l add %.250, %.6148 + %.6150 =l extsw 1 + %.6151 =l mul %.6150, 120 + %.6152 =l add %.6149, %.6151 + %.6153 =l extsw 1 + %.6154 =l mul %.6153, 20 + %.6155 =l add %.6152, %.6154 + %.6156 =w cnel %.6146, %.6155 + %.6157 =w xor %.6156, 18446744073709551615 + %.6158 =w copy %.6157 + %.6159 =w copy 8 + %.6160 =w call $safe_lshift_func_uint16_t_u_u(w %.6158, w %.6159) + %.6161 =w loadub %.6 + %.6162 =l extsw 4 + %.6163 =l mul %.6162, 1 + %.6164 =l add $g_132, %.6163 + %.6165 =w loadsb %.6164 + %.6166 =w extsb %.6165 + %.6167 =l loadl %.5993 + storeh %.6166, %.6167 + %.6168 =l extsh %.6166 + %.6169 =l and %.6168, 3197 + %.6170 =w ceql %.6169, 248615576 + %.6171 =l loadl %.2128 + %.6172 =l loadl %.6171 + %.6173 =w loadsw %.6172 + %.6174 =w ceqw %.6170, %.6173 + %.6175 =w call $safe_lshift_func_uint16_t_u_s(w %.6160, w %.6174) + %.6176 =l extuh %.6175 + %.6177 =l call $safe_div_func_uint64_t_u_u(l %.6145, l %.6176) + %.6178 =l copy 7 + %.6179 =w cnel %.6177, %.6178 + %.6180 =l extsw %.6179 + %.6181 =l and %.6180, 4 + %.6182 =w copy %.6181 + %.6183 =w loaduw %.4 + %.6184 =w call $safe_rshift_func_uint8_t_u_u(w %.6182, w %.6183) + %.6185 =w extub %.6184 + %.6186 =w xor %.6144, %.6185 + %.6187 =w cnel 9, 0 + jnz %.6187, @if_true.1350, @if_false.1351 +@if_true.1350 + %.6189 =l add %.6188, 0 + %.6190 =l copy 1 + storel %.6190, %.6189 + %.6192 =l add %.6191, 0 + %.6193 =l copy $g_265 + %.6194 =l mul 0, 1 + %.6195 =l add %.6193, %.6194 + %.6196 =l copy %.6195 + storel %.6196, %.6192 + %.6198 =l add %.6197, 0 + %.6199 =w copy 1 + storew %.6199, %.6198 + %.6200 =l copy $g_265 + %.6201 =l mul 48, 1 + %.6202 =l add %.6200, %.6201 + %.6203 =l copy %.6202 + storew 0, %.6203 +@for_cond.1352 + %.6204 =l copy $g_265 + %.6205 =l mul 48, 1 + %.6206 =l add %.6204, %.6205 + %.6207 =l copy %.6206 + %.6208 =w loadsw %.6207 + %.6209 =w csgtw %.6208, 13 + jnz %.6209, @for_body.1353, @for_join.1355 +@for_body.1353 + %.6211 =l add %.6210, 0 + %.6212 =l extsw 0 + %.6213 =l sub %.6212, 1 + %.6214 =w copy %.6213 + storew %.6214, %.6211 + %.6215 =w loadsw %.6210 + %.6216 =w cnew %.6215, 0 + jnz %.6216, @if_true.1356, @if_false.1357 +@if_true.1356 + jmp @for_join.1355 +@if_false.1357 +@for_cont.1354 + %.6217 =l copy $g_265 + %.6218 =l mul 48, 1 + %.6219 =l add %.6217, %.6218 + %.6220 =l copy %.6219 + %.6221 =w loadsw %.6220 + %.6222 =w add %.6221, 1 + storew %.6222, %.6220 + jmp @for_cond.1352 +@for_join.1355 + %.6223 =w loadsw %.2149 + %.6224 =l loadl %.2128 + %.6225 =l loadl %.6224 + %.6226 =w loadsw %.6225 + %.6227 =w loaduw %.2033 + %.6228 =l loadl $g_88 + %.6229 =l loadl %.6228 + %.6230 =l loadl %.6229 + %.6231 =w loadsw %.6230 + %.6232 =w copy %.6231 + %.6233 =w or %.6227, %.6232 + %.6234 =w loaduw %.4 + %.6235 =l loadl %.6188 + %.6236 =w copy %.6235 + %.6237 =w call $safe_lshift_func_uint8_t_u_s(w %.6236, w 2) + %.6238 =w loadub %.6 + %.6239 =l loadl %.6191 + storeb %.6238, %.6239 + %.6240 =w copy 1 + %.6241 =w call $safe_rshift_func_uint8_t_u_u(w %.6238, w %.6240) + %.6242 =w extub %.6241 + %.6243 =l loadl $g_173 + storew %.6242, %.6243 + %.6244 =l extsw 6 + %.6245 =l mul %.6244, 8 + %.6246 =l add $g_364, %.6245 + %.6247 =l extsw 0 + %.6248 =w ceql %.6246, %.6247 + %.6249 =w copy %.6248 + %.6250 =l extsw 0 + %.6251 =l mul %.6250, 4 + %.6252 =l add %.2152, %.6251 + %.6253 =w loaduw %.6252 + %.6254 =w or %.6249, %.6253 + %.6255 =l extuw %.6254 + %.6256 =l loadl $g_399 + %.6257 =l copy %.6256 + %.6258 =l call $safe_mod_func_int64_t_s_s(l %.6255, l %.6257) + %.6259 =l loadl %.2128 + %.6260 =l loadl %.6259 + %.6261 =w loadsw %.6260 + %.6262 =l extsw 5 + %.6263 =l mul %.6262, 8 + %.6264 =l add %.5631, %.6263 + %.6265 =l loadl %.6264 + %.6266 =w ceql %.2, %.6265 + %.6267 =w copy %.6266 + %.6268 =w copy 1 + %.6269 =w call $safe_lshift_func_int8_t_s_u(w %.6267, w %.6268) + %.6270 =w extsb %.6269 + %.6271 =w loaduw %.4 + %.6272 =w cnew %.6270, %.6271 + %.6273 =w copy %.6272 + %.6274 =w copy 5 + %.6275 =w call $safe_mod_func_uint8_t_u_u(w %.6273, w %.6274) + %.6276 =w extub %.6275 + %.6277 =w call $safe_add_func_int32_t_s_s(w %.6242, w %.6276) + %.6278 =w copy %.6277 + %.6279 =w call $safe_div_func_uint8_t_u_u(w %.6237, w %.6278) + %.6280 =w extub %.6279 + %.6281 =w culew %.6233, %.6280 + storew %.6281, %.6197 + %.6282 =w copy %.6281 + %.6283 =l loadl $g_422 + %.6284 =w loaduw %.6283 + %.6285 =w xor %.6282, %.6284 + %.6286 =l copy $g_265 + %.6287 =l mul 36, 1 + %.6288 =l add %.6286, %.6287 + %.6289 =l copy %.6288 + %.6290 =w loaduw %.6289 + %.6291 =w culew %.6285, %.6290 + %.6292 =w or %.6226, %.6291 + %.6293 =w loadub %.6 + %.6294 =w extub %.6293 + %.6295 =w cnew %.6294, 0 + jnz %.6295, @logic_join.1359, @logic_right.1358 +@logic_right.1358 + %.6296 =w cnel 0, 0 +@logic_join.1359 + %.6297 =w phi @for_join.1355 %.6295, @logic_right.1358 %.6296 + %.6298 =w and %.6223, %.6297 + storew %.6298, %.2149 + jmp @if_join.1360 +@if_false.1351 + %.6300 =l add %.6299, 0 + storel $g_173, %.6300 + %.6302 =l add %.6301, 0 + storel %.248, %.6302 + %.6304 =l add %.6303, 0 + %.6305 =l extsw 0 + %.6306 =l copy %.6305 + storel %.6306, %.6304 + %.6308 =l add %.6307, 0 + storel %.5993, %.6308 + %.6310 =l add %.6309, 0 + %.6311 =l extsw 0 + %.6312 =l mul %.6311, 40 + %.6313 =l add %.5749, %.6312 + %.6314 =l extsw 0 + %.6315 =l mul %.6314, 8 + %.6316 =l add %.6313, %.6315 + storel %.6316, %.6310 + %.6318 =l add %.6317, 0 + %.6319 =w copy 3360582374 + storew %.6319, %.6318 + %.6320 =l add %.6317, 4 + %.6321 =w copy 3360582374 + storew %.6321, %.6320 + %.6322 =l add %.6317, 8 + %.6323 =w copy 3745884853 + storew %.6323, %.6322 + %.6324 =l add %.6317, 12 + %.6325 =l extsw 0 + %.6326 =l sub %.6325, 8 + %.6327 =w copy %.6326 + storew %.6327, %.6324 + %.6328 =l add %.6317, 16 + %.6329 =w copy 1 + storew %.6329, %.6328 + %.6330 =l add %.6317, 20 + %.6331 =w copy 3745884853 + storew %.6331, %.6330 + %.6332 =l add %.6317, 24 + %.6333 =w copy 1 + storew %.6333, %.6332 + %.6334 =l add %.6317, 28 + %.6335 =l extsw 0 + %.6336 =l sub %.6335, 8 + %.6337 =w copy %.6336 + storew %.6337, %.6334 + %.6338 =l add %.6317, 32 + %.6339 =w copy 3745884853 + storew %.6339, %.6338 + %.6340 =l add %.6317, 36 + %.6341 =w copy 3360582374 + storew %.6341, %.6340 + %.6342 =l add %.6317, 40 + %.6343 =w copy 3360582374 + storew %.6343, %.6342 + %.6344 =l add %.6317, 44 + %.6345 =w copy 3745884853 + storew %.6345, %.6344 + %.6346 =l add %.6317, 48 + %.6347 =l extsw 0 + %.6348 =l sub %.6347, 8 + %.6349 =w copy %.6348 + storew %.6349, %.6346 + %.6350 =l add %.6317, 52 + %.6351 =w copy 1 + storew %.6351, %.6350 + %.6352 =l add %.6317, 56 + %.6353 =w copy 3745884853 + storew %.6353, %.6352 + %.6354 =l add %.6317, 60 + %.6355 =w copy 1 + storew %.6355, %.6354 + %.6356 =l add %.6317, 64 + %.6357 =l extsw 0 + %.6358 =l sub %.6357, 8 + %.6359 =w copy %.6358 + storew %.6359, %.6356 + %.6360 =l add %.6317, 68 + %.6361 =w copy 3745884853 + storew %.6361, %.6360 + %.6362 =l add %.6317, 72 + %.6363 =w copy 3360582374 + storew %.6363, %.6362 + %.6364 =l add %.6317, 76 + %.6365 =w copy 3360582374 + storew %.6365, %.6364 + %.6366 =l add %.6317, 80 + %.6367 =w copy 3745884853 + storew %.6367, %.6366 + %.6370 =l extsw 0 + %.6371 =l loadl %.6299 + %.6372 =w cnel %.6370, %.6371 + %.6373 =w copy %.6372 + %.6374 =l copy $g_265 + %.6375 =l mul 0, 1 + %.6376 =l add %.6374, %.6375 + %.6377 =l copy %.6376 + %.6378 =w loadub %.6377 + %.6379 =l extsw 0 + %.6380 =l extsw 1 + %.6381 =l mul %.6380, 240 + %.6382 =l add %.2153, %.6381 + %.6383 =l extsw 3 + %.6384 =l mul %.6383, 40 + %.6385 =l add %.6382, %.6384 + %.6386 =l extsw 0 + %.6387 =l mul %.6386, 8 + %.6388 =l add %.6385, %.6387 + %.6389 =l loadl %.6388 + %.6390 =w ceql %.6379, %.6389 + %.6391 =w copy %.6390 + %.6392 =w call $safe_mul_func_int16_t_s_s(w %.6373, w %.6391) + %.6393 =w extsh %.6392 + %.6394 =w loaduw %.5732 + %.6395 =w cnew %.6393, %.6394 + %.6396 =w cnew %.6395, 0 + jnz %.6396, @logic_right.1361, @logic_join.1362 +@logic_right.1361 + %.6397 =l loadl %.5993 + %.6398 =w loadsh %.6397 + %.6399 =w extsh %.6398 + %.6400 =l extsw 0 + %.6401 =l sub %.6400, 1 + %.6402 =w cnel %.6401, 0 + jnz %.6402, @logic_right.1365, @logic_join.1366 +@logic_right.1365 + %.6403 =w loadub %.2829 + %.6404 =w extub %.6403 + %.6405 =w cnew %.6404, 0 +@logic_join.1366 + %.6406 =w phi @logic_right.1361 %.6402, @logic_right.1365 %.6405 + %.6407 =w copy 0 + %.6408 =w copy 6 + %.6409 =w call $safe_lshift_func_uint16_t_u_u(w %.6407, w %.6408) + %.6410 =w extuh %.6409 + %.6411 =w or %.6406, %.6410 + %.6412 =l extsw %.6411 + %.6413 =l xor %.6412, 0 + %.6414 =w loadub %.6 + %.6415 =l extub %.6414 + %.6416 =w cultl %.6413, %.6415 + %.6417 =w cnew %.6416, 0 + jnz %.6417, @logic_right.1363, @logic_join.1364 +@logic_right.1363 + %.6418 =l extsw 0 + %.6419 =l sub %.6418, 1 + %.6420 =w cnel %.6419, 0 +@logic_join.1364 + %.6421 =w phi @logic_join.1366 %.6417, @logic_right.1363 %.6420 + %.6422 =w and %.6399, %.6421 + %.6423 =w copy %.6422 + storeh %.6423, %.6397 + %.6424 =w extsh %.6423 + %.6425 =w cnew %.6424, 0 +@logic_join.1362 + %.6426 =w phi @if_false.1351 %.6396, @logic_join.1364 %.6425 + %.6427 =l extsw %.6426 + %.6428 =l call $safe_unary_minus_func_uint64_t_u(l %.6427) + %.6429 =w loaduw %.4 + %.6430 =l extuw %.6429 + %.6431 =l or %.6428, %.6430 + %.6432 =w cnel %.6431, 0 + jnz %.6432, @if_true.1367, @if_false.1368 +@if_true.1367 + %.6434 =l add %.6433, 0 + %.6435 =l copy 12462308736532551437 + storel %.6435, %.6434 + %.6437 =l add %.6436, 0 + %.6438 =w copy 1687502936 + storew %.6438, %.6437 + %.6440 =l add %.6439, 0 + %.6441 =w copy 3369665070 + storew %.6441, %.6440 + %.6442 =l add %.6439, 4 + %.6443 =w copy 3369665070 + storew %.6443, %.6442 + %.6444 =l add %.6439, 8 + %.6445 =w copy 3369665070 + storew %.6445, %.6444 + %.6446 =l add %.6439, 12 + %.6447 =w copy 3369665070 + storew %.6447, %.6446 + %.6449 =l loadl %.6299 + %.6450 =l loadl %.6449 + %.6451 =w loadsw %.6450 + %.6452 =l extsw %.6451 + %.6453 =l extsw 0 + %.6454 =l sub %.6453, 6 + %.6455 =l or %.6452, %.6454 + %.6456 =w copy %.6455 + storew %.6456, %.6450 + %.6457 =l extsw %.6456 + storel %.6457, %.6433 + %.6458 =l loadl $g_477 + %.6459 =l sub %.6458, 1 + storel %.6459, $g_477 + jmp @if_join.1369 +@if_false.1368 + %.6460 =w copy 72875385 + %.6461 =l extsw 0 + %.6462 =l mul %.6461, 4 + %.6463 =l add %.5995, %.6462 + storew %.6460, %.6463 + %.6464 =l loadl $g_38 + %.6465 =l loadl %.6464 + ret %.6465 +@if_join.1369 + %.6466 =w copy 305323823 + %.6467 =l loadl $g_173 + %.6468 =w loadsw %.6467 + %.6469 =w call $safe_add_func_int32_t_s_s(w %.6466, w %.6468) + %.6470 =l extsw %.6469 + %.6471 =w cnel 1309538961660777797, %.6470 + %.6472 =w loadub %.5996 + %.6473 =w extub %.6472 + %.6474 =l loadl $g_363 + storel %.2829, %.6474 + %.6475 =l loadl %.6301 + storel %.6, %.6475 + %.6476 =w cnel %.2829, %.6 + %.6477 =w csgtw %.6473, %.6476 + %.6478 =w copy %.6477 + %.6479 =l loadl %.6307 + storel $g_81, %.6479 + %.6480 =l extsw 0 + %.6481 =l mul %.6480, 8 + %.6482 =l add %.5621, %.6481 + %.6483 =l loadl %.6482 + %.6484 =l copy %.6483 + %.6485 =l extsw 8 + %.6486 =l mul %.6485, 8 + %.6487 =l add %.5999, %.6486 + storel %.6484, %.6487 + %.6488 =w ceql $g_81, %.6484 + %.6489 =l loadl $g_38 + %.6490 =l loadl %.6489 + %.6491 =w loadsw %.6490 + %.6492 =l extsw %.6491 + %.6493 =w cnel %.6492, 2495061802 + %.6494 =w call $safe_add_func_int32_t_s_s(w %.6488, w %.6493) + %.6495 =w cnew %.6494, 0 + jnz %.6495, @logic_join.1373, @logic_right.1372 +@logic_right.1372 + %.6496 =w loadub %.6 + %.6497 =w extub %.6496 + %.6498 =w cnew %.6497, 0 +@logic_join.1373 + %.6499 =w phi @if_join.1369 %.6495, @logic_right.1372 %.6498 + %.6500 =w copy %.6499 + %.6501 =w call $safe_mod_func_uint16_t_u_u(w %.6478, w %.6500) + %.6502 =w extuh %.6501 + %.6503 =w csltw %.6471, %.6502 + %.6504 =l copy $g_130 + %.6505 =l mul 8, 1 + %.6506 =l add %.6504, %.6505 + %.6507 =l copy %.6506 + %.6508 =w loadsh %.6507 + %.6509 =w extsh %.6508 + %.6510 =w cnew %.6509, 0 + jnz %.6510, @logic_right.1370, @logic_join.1371 +@logic_right.1370 + %.6511 =w loadub %.6 + %.6512 =w extub %.6511 + %.6513 =w cnew %.6512, 0 +@logic_join.1371 + %.6514 =w phi @logic_join.1373 %.6510, @logic_right.1370 %.6513 + %.6515 =l loadl $g_422 + %.6516 =l extsw 0 + %.6517 =w ceql %.6515, %.6516 + %.6518 =w cnew %.6517, 0 + jnz %.6518, @if_true.1374, @if_false.1375 +@if_true.1374 + %.6520 =l add %.6519, 0 + %.6521 =l copy $g_265 + %.6522 =l mul 8, 1 + %.6523 =l add %.6521, %.6522 + %.6524 =l copy %.6523 + storel %.6524, %.6520 + %.6525 =l loadl %.6519 + %.6526 =w cnel $g_80, %.6525 + %.6527 =l loadl %.6299 + %.6528 =l loadl %.6527 + storew %.6526, %.6528 + %.6529 =l loadl %.2 + ret %.6529 +@if_false.1375 + %.6530 =l loadl %.2 + %.6531 =l loadl %.5982 + storel %.6530, %.6531 + storel %.6530, %.2036 + %.6532 =l loadl %.2128 + %.6533 =l loadl %.6532 + %.6534 =w loadsw %.6533 + %.6535 =w cnew %.6534, 0 + jnz %.6535, @if_true.1377, @if_false.1378 +@if_true.1377 + jmp @for_join.1337 +@if_false.1378 +@if_join.1376 + %.6536 =l extsw 0 + %.6537 =l mul %.6536, 40 + %.6538 =l add %.5749, %.6537 + %.6539 =l extsw 0 + %.6540 =l mul %.6539, 8 + %.6541 =l add %.6538, %.6540 + %.6542 =l loadl %.6541 + %.6543 =l loadl %.6309 + storel %.6542, %.6543 + %.6544 =l loadl $g_23 + %.6545 =w cnel %.6542, %.6544 + %.6546 =w copy %.6545 + %.6547 =w call $safe_lshift_func_uint16_t_u_s(w %.6546, w 3) + %.6548 =w copy %.6547 + %.6549 =w call $safe_rshift_func_int16_t_s_s(w %.6548, w 7) + %.6550 =w extsh %.6549 + %.6551 =w cnew %.6550, 0 + jnz %.6551, @if_true.1379, @if_false.1380 +@if_true.1379 + %.6553 =l add %.6552, 0 + %.6554 =w copy 911566708 + storew %.6554, %.6553 + %.6555 =w loaduw %.6552 + %.6556 =w sub %.6555, 1 + storew %.6556, %.6552 + %.6557 =w loadsw %.2125 + %.6558 =l copy $g_265 + %.6559 =l mul 48, 1 + %.6560 =l add %.6558, %.6559 + %.6561 =l copy %.6560 + %.6562 =w loadsw %.6561 + %.6563 =w copy %.6562 + %.6564 =l loadl %.5629 + %.6565 =l loadl %.6564 + %.6566 =w loaduw %.4 + %.6567 =w cnew %.6566, 0 + jnz %.6567, @logic_join.1382, @logic_right.1381 +@logic_right.1381 + %.6568 =l loadl %.2036 + %.6569 =w loadsw %.6568 + %.6570 =w loaduw %.4 + %.6571 =w loaduw %.4 + %.6572 =w loadub %.6 + %.6573 =w extub %.6572 + %.6574 =w cultw %.6571, %.6573 + %.6575 =w cnew %.6574, 0 + jnz %.6575, @logic_join.1384, @logic_right.1383 +@logic_right.1383 + %.6576 =w loadub %.2832 + %.6577 =l extub %.6576 + %.6578 =w cslel 4740881255833919779, %.6577 + %.6579 =l extsw %.6578 + %.6580 =l extsw 3 + %.6581 =l mul %.6580, 12 + %.6582 =l add %.6317, %.6581 + %.6583 =l extsw 2 + %.6584 =l mul %.6583, 4 + %.6585 =l add %.6582, %.6584 + %.6586 =w loadsw %.6585 + %.6587 =l extsw %.6586 + %.6588 =w cultl %.6587, 65531 + %.6589 =l copy $g_185 + %.6590 =l mul 40, 1 + %.6591 =l add %.6589, %.6590 + %.6592 =l copy %.6591 + %.6593 =w loadsw %.6592 + %.6594 =w csltw %.6588, %.6593 + %.6595 =l extsw 0 + %.6596 =l sub %.6595, 4 + %.6597 =w copy %.6596 + %.6598 =w call $safe_sub_func_int32_t_s_s(w %.6594, w %.6597) + %.6599 =w copy %.6598 + %.6600 =w copy 6 + %.6601 =w call $safe_lshift_func_uint16_t_u_u(w %.6599, w %.6600) + %.6602 =w copy %.6601 + %.6603 =w loadub %.6 + %.6604 =w extub %.6603 + %.6605 =w call $safe_rshift_func_int8_t_s_s(w %.6602, w %.6604) + %.6606 =l extsb %.6605 + %.6607 =l loadl %.2128 + %.6608 =l loadl %.6607 + %.6609 =w loadsw %.6608 + %.6610 =l extsw %.6609 + %.6611 =l call $safe_mod_func_uint64_t_u_u(l %.6606, l %.6610) + %.6612 =l extsw 0 + %.6613 =l mul %.6612, 40 + %.6614 =l add %.5749, %.6613 + %.6615 =l extsw 0 + %.6616 =l mul %.6615, 8 + %.6617 =l add %.6614, %.6616 + %.6618 =l loadl %.6617 + %.6619 =l extsw 0 + %.6620 =w cnel %.6618, %.6619 + %.6621 =l extsw %.6620 + %.6622 =l or %.6621, 4294967293 + %.6623 =l or %.6622, 1 + %.6624 =w copy %.6623 + %.6625 =l loadl %.2128 + %.6626 =l loadl %.6625 + %.6627 =w loadsw %.6626 + %.6628 =w copy %.6627 + %.6629 =w call $safe_add_func_int16_t_s_s(w %.6624, w %.6628) + %.6630 =w extsh %.6629 + %.6631 =w cnew %.6630, 0 + jnz %.6631, @logic_join.1388, @logic_right.1387 +@logic_right.1387 + %.6632 =l copy $g_130 + %.6633 =l mul 8, 1 + %.6634 =l add %.6632, %.6633 + %.6635 =l copy %.6634 + %.6636 =w loadsh %.6635 + %.6637 =w extsh %.6636 + %.6638 =w cnew %.6637, 0 +@logic_join.1388 + %.6639 =w phi @logic_right.1383 %.6631, @logic_right.1387 %.6638 + %.6640 =l extsw %.6639 + %.6641 =l call $safe_unary_minus_func_int64_t_s(l %.6640) + %.6642 =l loadl %.2036 + %.6643 =w loadsw %.6642 + %.6644 =l extsw %.6643 + %.6645 =w cnel %.6641, %.6644 + %.6646 =w cnew %.6645, 0 + jnz %.6646, @logic_right.1385, @logic_join.1386 +@logic_right.1385 + %.6647 =w cnel 39637, 0 +@logic_join.1386 + %.6648 =w phi @logic_join.1388 %.6646, @logic_right.1385 %.6647 + %.6649 =w csltl %.6579, 1651712922 + %.6650 =w cnew %.6649, 0 +@logic_join.1384 + %.6651 =w phi @logic_right.1381 %.6575, @logic_join.1386 %.6650 + %.6652 =w or %.6569, %.6651 + %.6653 =l copy $g_130 + %.6654 =l mul 8, 1 + %.6655 =l add %.6653, %.6654 + %.6656 =l copy %.6655 + %.6657 =w loadsh %.6656 + %.6658 =l copy $g_185 + %.6659 =l mul 16, 1 + %.6660 =l add %.6658, %.6659 + %.6661 =l copy %.6660 + %.6662 =w loadsw %.6661 + %.6663 =w cnew %.6662, 0 +@logic_join.1382 + %.6664 =w phi @if_true.1379 %.6567, @logic_join.1384 %.6663 + %.6665 =w loaduw %.4 + %.6666 =l extuw %.6665 + %.6667 =l or %.6565, %.6666 + storel %.6667, %.6564 + %.6668 =w loadsb %.2042 + %.6669 =l extsb %.6668 + %.6670 =w cugtl %.6667, %.6669 + %.6671 =w copy %.6670 + %.6672 =w call $safe_mul_func_int16_t_s_s(w %.6563, w %.6671) + %.6673 =w extsh %.6672 + %.6674 =l loadl $g_173 + storew %.6673, %.6674 + %.6675 =w or %.6557, %.6673 + storew %.6675, %.2125 + %.6676 =l loadl %.2 + %.6677 =w loadsw %.6676 + %.6678 =w cnew %.6677, 0 + jnz %.6678, @if_true.1389, @if_false.1390 +@if_true.1389 + jmp @for_join.1337 +@if_false.1390 + jmp @if_join.1391 +@if_false.1380 + %.6680 =l add %.6679, 0 + %.6681 =w copy 140 + storeb %.6681, %.6680 + storew 0, %.6683 +@for_cond.1392 + %.6684 =w loadsw %.6683 + %.6685 =w csltw %.6684, 1 + jnz %.6685, @for_body.1393, @for_join.1395 +@for_body.1393 + %.6686 =l copy $g_185 + %.6687 =l mul 8, 1 + %.6688 =l add %.6686, %.6687 + %.6689 =l copy %.6688 + %.6690 =w loadsw %.6683 + %.6691 =l extsw %.6690 + %.6692 =l mul %.6691, 8 + %.6693 =l add %.6682, %.6692 + storel %.6689, %.6693 +@for_cont.1394 + %.6694 =w loadsw %.6683 + %.6695 =w add %.6694, 1 + storew %.6695, %.6683 + jmp @for_cond.1392 +@for_join.1395 + %.6696 =l loadl %.6299 + %.6697 =l loadl %.6696 + %.6698 =w loadsw %.6697 + %.6699 =l extsw %.6698 + %.6700 =l extsw 0 + %.6701 =l sub %.6700, 5 + %.6702 =l and %.6699, %.6701 + %.6703 =w copy %.6702 + storew %.6703, %.6697 + %.6704 =l loadl %.6299 + %.6705 =l loadl %.6704 + %.6706 =l loadl %.6299 + storel %.6705, %.6706 + %.6707 =w loadsw %.2149 + %.6708 =w loadsb %.6679 + %.6709 =w copy %.6708 + %.6710 =l copy 11888349605583498864 + storel %.6710, $g_82 + %.6711 =w cnel %.6710, 0 + jnz %.6711, @logic_right.1396, @logic_join.1397 +@logic_right.1396 + %.6712 =w loaduw %.4 + %.6713 =w xor %.6712, 18446744073709551615 + %.6714 =w copy %.6713 + %.6715 =l loadl %.6299 + %.6716 =l loadl %.6715 + storew %.6714, %.6716 + %.6717 =w cnew %.6714, 0 +@logic_join.1397 + %.6718 =w phi @for_join.1395 %.6711, @logic_right.1396 %.6717 + %.6719 =w copy %.6718 + %.6720 =w call $safe_add_func_uint8_t_u_u(w %.6709, w %.6719) + %.6721 =w extub %.6720 + %.6722 =w and %.6707, %.6721 + storew %.6722, %.2149 + %.6723 =l loadl %.6299 + %.6724 =l loadl %.6723 + %.6725 =w loadsw %.6724 + %.6726 =l loadl $g_82 + %.6727 =l copy 6184310116488843811 + %.6728 =l copy 1 + %.6729 =w cugtl %.6727, %.6728 + %.6730 =l extsw %.6729 + %.6731 =w cslel %.6726, %.6730 + %.6732 =w loaduw %.4 + %.6733 =l extuw %.6732 + %.6734 =w csgel 408415716, %.6733 + %.6735 =l copy 7 + %.6736 =w cnel %.6735, 1 + %.6737 =w xor %.6731, %.6736 + %.6738 =w and %.6725, %.6737 + storew %.6738, %.6724 +@if_join.1391 + %.6739 =l loadl %.6126 + ret %.6739 +@if_join.1360 + %.6740 =l loadl %.2 + %.6741 =w loadsw %.6740 + %.6742 =l loadl $g_173 + storew %.6741, %.6742 + %.6743 =l loadl %.6126 + storew %.6741, %.6743 + jmp @if_join.1398 +@if_false.1345 + %.6745 =l add %.6744, 0 + %.6746 =l copy $g_130 + %.6747 =l mul 8, 1 + %.6748 =l add %.6746, %.6747 + %.6749 =l copy %.6748 + storel %.6749, %.6745 + %.6752 =l add %.6751, 0 + %.6753 =w copy 3440299814 + storew %.6753, %.6752 + %.6755 =l add %.6754, 0 + %.6756 =l copy $g_185 + %.6757 =l mul 32, 1 + %.6758 =l add %.6756, %.6757 + %.6759 =l copy %.6758 + storel %.6759, %.6755 + storew 0, %.6760 +@for_cond.1399 + %.6761 =w loadsw %.6760 + %.6762 =w csltw %.6761, 2 + jnz %.6762, @for_body.1400, @for_join.1402 +@for_body.1400 + %.6763 =w copy 1393370637 + %.6764 =w loadsw %.6760 + %.6765 =l extsw %.6764 + %.6766 =l mul %.6765, 4 + %.6767 =l add %.6750, %.6766 + storew %.6763, %.6767 +@for_cont.1401 + %.6768 =w loadsw %.6760 + %.6769 =w add %.6768, 1 + storew %.6769, %.6760 + jmp @for_cond.1399 +@for_join.1402 + %.6770 =l loadl $g_88 + %.6771 =l loadl %.6770 + %.6772 =l loadl %.6771 + %.6773 =w loadsw %.6772 + %.6774 =w cnew %.6773, 0 + jnz %.6774, @if_true.1403, @if_false.1404 +@if_true.1403 + jmp @for_join.1337 +@if_false.1404 + %.6775 =w copy 0 + storew %.6775, %.4 +@for_cond.1405 + %.6776 =w loaduw %.4 + %.6777 =w copy 24 + %.6778 =w cnew %.6776, %.6777 + jnz %.6778, @for_body.1406, @for_join.1408 +@for_body.1406 + %.6781 =l add %.6780, 0 + %.6782 =w copy 72 + storeb %.6782, %.6781 + %.6783 =l add %.6780, 1 + storeb 0, %.6783 + %.6784 =l add %.6780, 2 + storeh 0, %.6784 + %.6785 =l add %.6780, 4 + storew 0, %.6785 + %.6786 =l add %.6780, 8 + storel 321589332028328224, %.6786 + %.6787 =l add %.6780, 16 + %.6788 =w copy 9 + storew %.6788, %.6787 + %.6789 =l add %.6780, 20 + storew 0, %.6789 + %.6790 =l add %.6780, 24 + %.6791 =l copy 1143993877391193064 + storel %.6791, %.6790 + %.6792 =l add %.6780, 32 + %.6793 =w copy 4294967295 + storew %.6793, %.6792 + %.6794 =l add %.6780, 36 + %.6795 =w copy 6 + storew %.6795, %.6794 + %.6796 =l add %.6780, 40 + %.6797 =w copy 2065283816 + storew %.6797, %.6796 + %.6798 =l add %.6780, 44 + %.6799 =l extsw 0 + %.6800 =l sub %.6799, 1 + %.6801 =w copy %.6800 + storew %.6801, %.6798 + %.6802 =l add %.6780, 48 + %.6803 =w copy 3321033948 + storew %.6803, %.6802 + %.6804 =l add %.6780, 52 + storew 0, %.6804 + storew 0, %.6806 +@for_cond.1409 + %.6808 =w loadsw %.6806 + %.6809 =w csltw %.6808, 7 + jnz %.6809, @for_body.1410, @for_join.1412 +@for_body.1410 + storew 0, %.6807 +@for_cond.1413 + %.6810 =w loadsw %.6807 + %.6811 =w csltw %.6810, 2 + jnz %.6811, @for_body.1414, @for_join.1416 +@for_body.1414 + %.6812 =w copy 1 + %.6813 =w loadsw %.6806 + %.6814 =l extsw %.6813 + %.6815 =l mul %.6814, 2 + %.6816 =l add %.6779, %.6815 + %.6817 =w loadsw %.6807 + %.6818 =l extsw %.6817 + %.6819 =l mul %.6818, 1 + %.6820 =l add %.6816, %.6819 + storeb %.6812, %.6820 +@for_cont.1415 + %.6821 =w loadsw %.6807 + %.6822 =w add %.6821, 1 + storew %.6822, %.6807 + jmp @for_cond.1413 +@for_join.1416 +@for_cont.1411 + %.6823 =w loadsw %.6806 + %.6824 =w add %.6823, 1 + storew %.6824, %.6806 + jmp @for_cond.1409 +@for_join.1412 + storew 0, %.6806 +@for_cond.1417 + %.6825 =w loadsw %.6806 + %.6826 =w csltw %.6825, 1 + jnz %.6826, @for_body.1418, @for_join.1420 +@for_body.1418 + %.6827 =w copy 47661 + %.6828 =w loadsw %.6806 + %.6829 =l extsw %.6828 + %.6830 =l mul %.6829, 2 + %.6831 =l add %.6805, %.6830 + storeh %.6827, %.6831 +@for_cont.1419 + %.6832 =w loadsw %.6806 + %.6833 =w add %.6832, 1 + storew %.6833, %.6806 + jmp @for_cond.1417 +@for_join.1420 + %.6834 =w loadub %.6 + %.6835 =w loaduw %.4 + %.6836 =w copy %.6835 + %.6837 =w call $safe_lshift_func_uint8_t_u_s(w %.6836, w 1) + %.6838 =w loaduw %.4 + %.6839 =w copy %.6838 + %.6840 =w call $safe_sub_func_uint8_t_u_u(w %.6837, w %.6839) + %.6841 =l extub %.6840 + %.6842 =l and 248, %.6841 + %.6843 =w cnel %.6842, 0 + jnz %.6843, @if_true.1421, @if_false.1422 +@if_true.1421 + %.6844 =l loadl %.2 + storel %.6844, %.2 + %.6845 =w cslel 20, 0 + %.6846 =l loadl %.2036 + storew %.6845, %.6846 + jmp @if_join.1423 +@if_false.1422 + %.6848 =l add %.6847, 0 + %.6849 =w copy 65534 + storeh %.6849, %.6848 + %.6851 =l add %.6850, 0 + %.6852 =w copy 1179286828 + storew %.6852, %.6851 + %.6854 =l add %.6853, 0 + %.6855 =l extsw 0 + %.6856 =l sub %.6855, 4 + %.6857 =w copy %.6856 + storew %.6857, %.6854 + %.6858 =l extsw 5 + %.6859 =l mul %.6858, 2 + %.6860 =l add %.6779, %.6859 + %.6861 =l extsw 1 + %.6862 =l mul %.6861, 1 + %.6863 =l add %.6860, %.6862 + %.6864 =w loadub %.6863 + %.6865 =w sub %.6864, 1 + storeb %.6865, %.6863 + %.6866 =w loadsw %.6751 + %.6867 =w loadsw %.6853 + %.6868 =w loaduh %.6847 + %.6869 =w sub %.6868, 1 + storeh %.6869, %.6847 + %.6870 =l loadl %.6744 + %.6871 =w loadsh %.6870 + %.6872 =w extsh %.6871 + %.6873 =l extsw 4 + %.6874 =l mul %.6873, 8 + %.6875 =l add %.5621, %.6874 + %.6876 =l loadl %.6875 + %.6877 =l loadl %.6744 + %.6878 =w ceql %.6876, %.6877 + %.6879 =l extsw %.6878 + %.6880 =l loadl %.5629 + %.6881 =l loadl %.6880 + %.6882 =l xor %.6881, 17145105804842445641 + storel %.6882, %.6880 + %.6883 =w cugtl %.6879, %.6882 + %.6884 =l loadl %.2036 + %.6885 =w loadsw %.6884 + %.6886 =l copy $g_518 + %.6887 =l mul 40, 1 + %.6888 =l add %.6886, %.6887 + %.6889 =l copy %.6888 + %.6890 =w loadsw %.6889 + %.6891 =w csgew %.6883, %.6890 + %.6892 =l extsw 1 + %.6893 =l mul %.6892, 4 + %.6894 =l add %.6750, %.6893 + %.6895 =w loadsw %.6894 + %.6896 =l loadl $g_422 + %.6897 =w loaduw %.6896 + %.6898 =w loadsw %.6850 + %.6899 =w copy %.6898 + %.6900 =l loadl %.2128 + %.6901 =l loadl %.6900 + %.6902 =w loadsw %.6901 + %.6903 =w copy %.6902 + %.6904 =w call $safe_rshift_func_uint16_t_u_u(w %.6899, w %.6903) + %.6905 =l extsw 0 + %.6906 =l mul %.6905, 4 + %.6907 =l add %.6750, %.6906 + %.6908 =w loadsw %.6907 + %.6909 =l copy $g_185 + %.6910 =l mul 48, 1 + %.6911 =l add %.6909, %.6910 + %.6912 =l copy %.6911 + %.6913 =w loadsw %.6912 + %.6914 =w cnew %.6908, %.6913 + %.6915 =l copy 1 + storel %.6915, $g_82 + %.6916 =l copy $g_265 + %.6917 =l mul 32, 1 + %.6918 =l add %.6916, %.6917 + %.6919 =l copy %.6918 + %.6920 =w loaduw %.6919 + %.6921 =l extuw %.6920 + %.6922 =l xor %.6915, %.6921 + %.6923 =w copy %.6922 + %.6924 =l loadl %.2128 + %.6925 =l loadl %.6924 + %.6926 =w loadsw %.6925 + %.6927 =w call $safe_lshift_func_int16_t_s_s(w %.6923, w %.6926) + %.6928 =l extsh %.6927 + %.6929 =l xor %.6928, 255 + %.6930 =l copy 1 + %.6931 =w ceql %.6929, %.6930 + %.6932 =w copy %.6931 + %.6933 =w call $safe_mul_func_uint32_t_u_u(w %.6897, w %.6932) + %.6934 =w loaduw %.4 + %.6935 =l extsw 0 + %.6936 =l loadl %.5890 + %.6937 =w ceql %.6935, %.6936 + %.6938 =w copy %.6937 + %.6939 =l loadl $g_422 + %.6940 =w loaduw %.6939 + %.6941 =w call $safe_add_func_uint32_t_u_u(w %.6938, w %.6940) + %.6942 =w cnew %.6941, 0 + jnz %.6942, @logic_right.1426, @logic_join.1427 +@logic_right.1426 + %.6943 =l loadl %.2036 + %.6944 =w loadsw %.6943 + %.6945 =w cnew %.6944, 0 +@logic_join.1427 + %.6946 =w phi @if_false.1422 %.6942, @logic_right.1426 %.6945 + %.6947 =l extsw 9 + %.6948 =l mul %.6947, 8 + %.6949 =l add %.5631, %.6948 + %.6950 =l extsw 0 + %.6951 =w cnel %.6949, %.6950 + %.6952 =w ceqw %.6951, 0 + %.6953 =w cnew %.6952, 0 + jnz %.6953, @logic_join.1425, @logic_right.1424 +@logic_right.1424 + %.6954 =w cnel 1980754864, 0 +@logic_join.1425 + %.6955 =w phi @logic_join.1427 %.6953, @logic_right.1424 %.6954 + %.6956 =w csltw %.6895, %.6955 + %.6957 =w copy %.6956 + %.6958 =w copy 1113302927 + %.6959 =w call $safe_div_func_uint32_t_u_u(w %.6957, w %.6958) + %.6960 =w loadub $g_566 + %.6961 =w extub %.6960 + %.6962 =w and %.6959, %.6961 + %.6963 =w copy %.6962 + %.6964 =w call $safe_rshift_func_int16_t_s_s(w %.6963, w 1) + %.6965 =l extsw 0 + %.6966 =l mul %.6965, 2 + %.6967 =l add %.6805, %.6966 + %.6968 =w loadsh %.6967 + %.6969 =l extsw 1 + %.6970 =l mul %.6969, 4 + %.6971 =l add %.6750, %.6970 + %.6972 =w loadsw %.6971 + %.6973 =w copy %.6972 + %.6974 =w call $safe_lshift_func_int16_t_s_u(w %.6968, w %.6973) + %.6975 =w extsh %.6974 + %.6976 =l loadl $g_173 + storew %.6975, %.6976 + %.6977 =l extsw %.6975 + %.6978 =w cugel %.6977, 0 + %.6979 =w csgew %.6891, %.6978 + %.6980 =w xor %.6872, %.6979 + %.6981 =w copy %.6980 + storeh %.6981, %.6870 + %.6982 =w copy %.6981 + %.6983 =w call $safe_div_func_uint16_t_u_u(w %.6869, w %.6982) + %.6984 =w extuh %.6983 + %.6985 =w and %.6867, %.6984 + storew %.6985, %.6853 + %.6986 =w or %.6866, %.6985 + storew %.6986, %.6751 +@if_join.1423 +@for_cont.1407 + %.6987 =w loaduw %.4 + %.6988 =w add %.6987, 1 + storew %.6988, %.4 + jmp @for_cond.1405 +@for_join.1408 + %.6989 =w copy 0 + storeb %.6989, %.6 +@for_cond.1428 + %.6990 =w loadub %.6 + %.6991 =w extub %.6990 + %.6992 =w cslew %.6991, 3 + jnz %.6992, @for_body.1429, @for_join.1431 +@for_body.1429 + %.6995 =l loadl %.2 + ret %.6995 +@for_cont.1430 + %.6996 =w loadub %.6 + %.6997 =w extub %.6996 + %.6998 =w add %.6997, 1 + %.6999 =w copy %.6998 + storeb %.6999, %.6 + jmp @for_cond.1428 +@for_join.1431 + %.7000 =l loadl $g_173 + %.7001 =w loadsw %.7000 + %.7002 =l extsw %.7001 + %.7003 =w loadub %.6 + %.7004 =w extub %.7003 + %.7005 =w loaduh $g_425 + %.7006 =w extuh %.7005 + %.7007 =w copy 5 + %.7008 =l loadl %.2005 + storeb %.7007, %.7008 + %.7009 =w extsb %.7007 + %.7010 =w or %.7006, %.7009 + %.7011 =w or %.7004, %.7010 + %.7012 =w copy %.7011 + storeb %.7012, %.6 + %.7013 =l loadl %.2128 + %.7014 =l loadl %.7013 + %.7015 =w loadsw %.7014 + %.7016 =l copy $g_265 + %.7017 =l mul 40, 1 + %.7018 =l add %.7016, %.7017 + %.7019 =l copy %.7018 + %.7020 =w loadsw %.7019 + %.7021 =l copy $g_185 + %.7022 =l mul 32, 1 + %.7023 =l add %.7021, %.7022 + %.7024 =l copy %.7023 + %.7025 =w loaduw %.7024 + %.7026 =w copy %.7025 + %.7027 =w loaduw %.4 + %.7028 =l loadl $g_477 + %.7029 =l loadl %.6754 + %.7030 =l extsw 0 + %.7031 =w ceql %.7029, %.7030 + %.7032 =w xor %.7031, 18446744073709551615 + %.7033 =w loadsw %.6751 + %.7034 =w cnew %.7032, %.7033 + %.7035 =w cnew %.7034, 0 + jnz %.7035, @logic_right.1438, @logic_join.1439 +@logic_right.1438 + %.7036 =l copy $g_265 + %.7037 =l mul 16, 1 + %.7038 =l add %.7036, %.7037 + %.7039 =l copy %.7038 + %.7040 =w loadsw %.7039 + %.7041 =w cnew %.7040, 0 +@logic_join.1439 + %.7042 =w phi @for_join.1431 %.7035, @logic_right.1438 %.7041 + %.7043 =w copy %.7042 + %.7044 =w call $safe_mul_func_int8_t_s_s(w %.7026, w %.7043) + %.7045 =w extsb %.7044 + %.7046 =w cslew %.7020, %.7045 + %.7047 =l copy $g_265 + %.7048 =l mul 36, 1 + %.7049 =l add %.7047, %.7048 + %.7050 =l copy %.7049 + %.7051 =w loaduw %.7050 + %.7052 =w copy 0 + %.7053 =w ceqw %.7051, %.7052 + %.7054 =l extsw %.7053 + %.7055 =w csltl 661320705, %.7054 + %.7056 =w copy %.7055 + %.7057 =l extsw 0 + %.7058 =l sub %.7057, 1 + %.7059 =w copy %.7058 + %.7060 =w call $safe_add_func_uint8_t_u_u(w %.7056, w %.7059) + %.7061 =l extub %.7060 + %.7062 =l extsw 0 + %.7063 =l sub %.7062, 1 + %.7064 =w ceql %.7061, %.7063 + %.7065 =w cnew %.7064, 0 + jnz %.7065, @logic_join.1437, @logic_right.1436 +@logic_right.1436 + %.7066 =l loadl %.2128 + %.7067 =l loadl %.7066 + %.7068 =w loadsw %.7067 + %.7069 =w cnew %.7068, 0 +@logic_join.1437 + %.7070 =w phi @logic_join.1439 %.7065, @logic_right.1436 %.7069 + %.7071 =w copy %.7070 + %.7072 =l copy $g_518 + %.7073 =l mul 8, 1 + %.7074 =l add %.7072, %.7073 + %.7075 =l copy %.7074 + %.7076 =l loadl %.7075 + %.7077 =w copy %.7076 + %.7078 =w call $safe_sub_func_int8_t_s_s(w %.7071, w %.7077) + %.7079 =w extsb %.7078 + %.7080 =l extsw 0 + %.7081 =l mul %.7080, 4 + %.7082 =l add %.6750, %.7081 + %.7083 =w loadsw %.7082 + %.7084 =w cslew %.7079, %.7083 + %.7085 =w loaduw %.4 + %.7086 =l extuw %.7085 + %.7087 =l loadl $g_80 + %.7088 =w csltl %.7086, %.7087 + %.7089 =w cnew %.7088, 0 + jnz %.7089, @logic_join.1435, @logic_right.1434 +@logic_right.1434 + %.7090 =w loaduw %.4 + %.7091 =w cnew %.7090, 0 +@logic_join.1435 + %.7092 =w phi @logic_join.1437 %.7089, @logic_right.1434 %.7091 + %.7093 =w cnew %.7092, 0 + jnz %.7093, @logic_right.1432, @logic_join.1433 +@logic_right.1432 + %.7094 =w loaduw %.4 + %.7095 =w cnew %.7094, 0 +@logic_join.1433 + %.7096 =w phi @logic_join.1435 %.7093, @logic_right.1432 %.7095 + %.7097 =w copy %.7096 + %.7098 =w copy 2 + %.7099 =w call $safe_mul_func_int16_t_s_s(w %.7097, w %.7098) + %.7100 =w copy %.7099 + %.7101 =w copy 252 + %.7102 =w call $safe_mod_func_uint8_t_u_u(w %.7100, w %.7101) + %.7103 =w call $safe_add_func_uint8_t_u_u(w %.7012, w %.7102) + %.7104 =l or %.7002, 2129988974 + %.7105 =w copy %.7104 + storew %.7105, %.7000 +@if_join.1398 + %.7106 =w loaduw %.5892 + %.7107 =w add %.7106, 1 + storew %.7107, %.5892 +@for_cont.1336 + %.7108 =w loadub $g_46 + %.7109 =w add %.7108, 1 + storeb %.7109, $g_46 + jmp @for_cond.1334 +@for_join.1337 + jmp @if_join.1440 +@if_false.1243 + %.7111 =l add %.7110, 0 + %.7112 =l copy 0 + storel %.7112, %.7111 + %.7113 =l add %.7110, 8 + %.7114 =l copy 0 + storel %.7114, %.7113 + %.7115 =l add %.7110, 16 + %.7116 =l copy 0 + storel %.7116, %.7115 + %.7117 =l add %.7110, 24 + %.7118 =l copy 0 + storel %.7118, %.7117 + %.7119 =l add %.7110, 32 + %.7120 =l copy 0 + storel %.7120, %.7119 + %.7121 =l add %.7110, 40 + %.7122 =l copy 0 + storel %.7122, %.7121 + %.7123 =l add %.7110, 48 + %.7124 =l copy 0 + storel %.7124, %.7123 + %.7125 =l add %.7110, 56 + %.7126 =l copy 0 + storel %.7126, %.7125 + %.7128 =l add %.7127, 0 + %.7129 =l extsw 2 + %.7130 =l mul %.7129, 360 + %.7131 =l add %.250, %.7130 + %.7132 =l extsw 1 + %.7133 =l mul %.7132, 120 + %.7134 =l add %.7131, %.7133 + %.7135 =l extsw 1 + %.7136 =l mul %.7135, 20 + %.7137 =l add %.7134, %.7136 + %.7138 =l copy %.7137 + %.7139 =l mul 8, 1 + %.7140 =l add %.7138, %.7139 + %.7141 =l copy %.7140 + storel %.7141, %.7128 + %.7143 =l add %.7142, 0 + storel %.7127, %.7143 + %.7145 =l add %.7144, 0 + %.7146 =l copy 7 + storel %.7146, %.7145 + %.7148 =l add %.7147, 0 + storel $g_425, %.7148 + %.7149 =l add %.7147, 8 + storel $g_425, %.7149 + %.7150 =l add %.7147, 16 + storel $g_425, %.7150 + %.7151 =l add %.7147, 24 + storel $g_425, %.7151 + %.7153 =l add %.7152, 0 + %.7154 =w copy 2883204843 + storew %.7154, %.7153 + %.7156 =l add %.7155, 0 + %.7157 =l extsw 0 + %.7158 =l copy %.7157 + storel %.7158, %.7156 + %.7159 =l add %.7155, 8 + %.7160 =l extsw 0 + %.7161 =l copy %.7160 + storel %.7161, %.7159 + %.7162 =l add %.7155, 16 + storel $g_173, %.7162 + %.7163 =l add %.7155, 24 + %.7164 =l extsw 3 + %.7165 =l mul %.7164, 320 + %.7166 =l add %.7, %.7165 + %.7167 =l extsw 1 + %.7168 =l mul %.7167, 64 + %.7169 =l add %.7166, %.7168 + %.7170 =l extsw 4 + %.7171 =l mul %.7170, 8 + %.7172 =l add %.7169, %.7171 + storel %.7172, %.7163 + %.7173 =l add %.7155, 32 + storel %.2036, %.7173 + %.7174 =l add %.7155, 40 + %.7175 =l extsw 0 + %.7176 =l copy %.7175 + storel %.7176, %.7174 + %.7177 =l add %.7155, 48 + storel $g_23, %.7177 + %.7178 =l add %.7155, 56 + storel %.2036, %.7178 + %.7179 =l add %.7155, 64 + storel $g_173, %.7179 + %.7180 =l add %.7155, 72 + storel %.2036, %.7180 + %.7181 =l add %.7155, 80 + storel %.2036, %.7181 + %.7182 =l add %.7155, 88 + %.7183 =l extsw 3 + %.7184 =l mul %.7183, 320 + %.7185 =l add %.7, %.7184 + %.7186 =l extsw 3 + %.7187 =l mul %.7186, 64 + %.7188 =l add %.7185, %.7187 + %.7189 =l extsw 1 + %.7190 =l mul %.7189, 8 + %.7191 =l add %.7188, %.7190 + storel %.7191, %.7182 + %.7192 =l add %.7155, 96 + %.7193 =l extsw 3 + %.7194 =l mul %.7193, 320 + %.7195 =l add %.7, %.7194 + %.7196 =l extsw 3 + %.7197 =l mul %.7196, 64 + %.7198 =l add %.7195, %.7197 + %.7199 =l extsw 1 + %.7200 =l mul %.7199, 8 + %.7201 =l add %.7198, %.7200 + storel %.7201, %.7192 + %.7202 =l add %.7155, 104 + %.7203 =l extsw 3 + %.7204 =l mul %.7203, 320 + %.7205 =l add %.7, %.7204 + %.7206 =l extsw 3 + %.7207 =l mul %.7206, 64 + %.7208 =l add %.7205, %.7207 + %.7209 =l extsw 1 + %.7210 =l mul %.7209, 8 + %.7211 =l add %.7208, %.7210 + storel %.7211, %.7202 + %.7212 =l add %.7155, 112 + %.7213 =l extsw 0 + %.7214 =l copy %.7213 + storel %.7214, %.7212 + %.7215 =l add %.7155, 120 + storel %.2036, %.7215 + %.7216 =l add %.7155, 128 + %.7217 =l extsw 0 + %.7218 =l copy %.7217 + storel %.7218, %.7216 + %.7219 =l add %.7155, 136 + storel %.2036, %.7219 + %.7220 =l add %.7155, 144 + %.7221 =l extsw 0 + %.7222 =l copy %.7221 + storel %.7222, %.7220 + %.7223 =l add %.7155, 152 + storel $g_23, %.7223 + %.7224 =l add %.7155, 160 + %.7225 =l extsw 0 + %.7226 =l copy %.7225 + storel %.7226, %.7224 + %.7227 =l add %.7155, 168 + storel $g_173, %.7227 + %.7228 =l add %.7155, 176 + storel $g_173, %.7228 + %.7229 =l add %.7155, 184 + storel %.2036, %.7229 + %.7230 =l add %.7155, 192 + storel $g_23, %.7230 + %.7231 =l add %.7155, 200 + storel $g_173, %.7231 + %.7232 =l add %.7155, 208 + storel $g_173, %.7232 + %.7233 =l add %.7155, 216 + storel %.2036, %.7233 + %.7234 =l add %.7155, 224 + %.7235 =l extsw 2 + %.7236 =l mul %.7235, 320 + %.7237 =l add %.7, %.7236 + %.7238 =l extsw 2 + %.7239 =l mul %.7238, 64 + %.7240 =l add %.7237, %.7239 + %.7241 =l extsw 0 + %.7242 =l mul %.7241, 8 + %.7243 =l add %.7240, %.7242 + storel %.7243, %.7234 + %.7244 =l add %.7155, 232 + storel $g_23, %.7244 + %.7245 =l add %.7155, 240 + storel %.2036, %.7245 + %.7246 =l add %.7155, 248 + %.7247 =l extsw 3 + %.7248 =l mul %.7247, 320 + %.7249 =l add %.7, %.7248 + %.7250 =l extsw 3 + %.7251 =l mul %.7250, 64 + %.7252 =l add %.7249, %.7251 + %.7253 =l extsw 1 + %.7254 =l mul %.7253, 8 + %.7255 =l add %.7252, %.7254 + storel %.7255, %.7246 + %.7256 =l add %.7155, 256 + %.7257 =l extsw 0 + %.7258 =l copy %.7257 + storel %.7258, %.7256 + %.7259 =l add %.7155, 264 + %.7260 =l extsw 0 + %.7261 =l copy %.7260 + storel %.7261, %.7259 + %.7262 =l add %.7155, 272 + %.7263 =l extsw 0 + %.7264 =l copy %.7263 + storel %.7264, %.7262 + %.7265 =l add %.7155, 280 + storel $g_173, %.7265 + %.7266 =l add %.7155, 288 + storel $g_23, %.7266 + %.7267 =l add %.7155, 296 + %.7268 =l extsw 3 + %.7269 =l mul %.7268, 320 + %.7270 =l add %.7, %.7269 + %.7271 =l extsw 1 + %.7272 =l mul %.7271, 64 + %.7273 =l add %.7270, %.7272 + %.7274 =l extsw 2 + %.7275 =l mul %.7274, 8 + %.7276 =l add %.7273, %.7275 + storel %.7276, %.7267 + %.7277 =l add %.7155, 304 + storel $g_23, %.7277 + %.7278 =l add %.7155, 312 + %.7279 =l extsw 3 + %.7280 =l mul %.7279, 320 + %.7281 =l add %.7, %.7280 + %.7282 =l extsw 1 + %.7283 =l mul %.7282, 64 + %.7284 =l add %.7281, %.7283 + %.7285 =l extsw 6 + %.7286 =l mul %.7285, 8 + %.7287 =l add %.7284, %.7286 + storel %.7287, %.7278 + %.7288 =l add %.7155, 320 + storel %.2036, %.7288 + %.7289 =l add %.7155, 328 + storel $g_23, %.7289 + %.7290 =l add %.7155, 336 + %.7291 =l extsw 0 + %.7292 =l copy %.7291 + storel %.7292, %.7290 + %.7293 =l add %.7155, 344 + %.7294 =l extsw 0 + %.7295 =l copy %.7294 + storel %.7295, %.7293 + %.7296 =l add %.7155, 352 + storel %.2036, %.7296 + %.7297 =l add %.7155, 360 + storel %.2036, %.7297 + %.7298 =l add %.7155, 368 + storel %.2036, %.7298 + %.7299 =l add %.7155, 376 + storel %.2036, %.7299 + %.7300 =l add %.7155, 384 + %.7301 =l extsw 0 + %.7302 =l copy %.7301 + storel %.7302, %.7300 + %.7303 =l add %.7155, 392 + storel %.2036, %.7303 + %.7304 =l add %.7155, 400 + %.7305 =l extsw 3 + %.7306 =l mul %.7305, 320 + %.7307 =l add %.7, %.7306 + %.7308 =l extsw 3 + %.7309 =l mul %.7308, 64 + %.7310 =l add %.7307, %.7309 + %.7311 =l extsw 1 + %.7312 =l mul %.7311, 8 + %.7313 =l add %.7310, %.7312 + storel %.7313, %.7304 + %.7314 =l add %.7155, 408 + storel $g_23, %.7314 + %.7315 =l add %.7155, 416 + storel %.2036, %.7315 + %.7316 =l add %.7155, 424 + %.7317 =l extsw 0 + %.7318 =l copy %.7317 + storel %.7318, %.7316 + %.7319 =l add %.7155, 432 + storel $g_173, %.7319 + %.7320 =l add %.7155, 440 + %.7321 =l extsw 0 + %.7322 =l mul %.7321, 320 + %.7323 =l add %.7, %.7322 + %.7324 =l extsw 0 + %.7325 =l mul %.7324, 64 + %.7326 =l add %.7323, %.7325 + %.7327 =l extsw 2 + %.7328 =l mul %.7327, 8 + %.7329 =l add %.7326, %.7328 + storel %.7329, %.7320 + %.7330 =l add %.7155, 448 + %.7331 =l extsw 0 + %.7332 =l copy %.7331 + storel %.7332, %.7330 + %.7333 =l add %.7155, 456 + %.7334 =l extsw 0 + %.7335 =l copy %.7334 + storel %.7335, %.7333 + %.7336 =l add %.7155, 464 + %.7337 =l extsw 0 + %.7338 =l copy %.7337 + storel %.7338, %.7336 + %.7339 =l add %.7155, 472 + storel $g_173, %.7339 + %.7340 =l add %.7155, 480 + storel %.2036, %.7340 + %.7341 =l add %.7155, 488 + %.7342 =l extsw 3 + %.7343 =l mul %.7342, 320 + %.7344 =l add %.7, %.7343 + %.7345 =l extsw 3 + %.7346 =l mul %.7345, 64 + %.7347 =l add %.7344, %.7346 + %.7348 =l extsw 1 + %.7349 =l mul %.7348, 8 + %.7350 =l add %.7347, %.7349 + storel %.7350, %.7341 + %.7351 =l add %.7155, 496 + %.7352 =l extsw 0 + %.7353 =l copy %.7352 + storel %.7353, %.7351 + %.7354 =l add %.7155, 504 + %.7355 =l extsw 3 + %.7356 =l mul %.7355, 320 + %.7357 =l add %.7, %.7356 + %.7358 =l extsw 3 + %.7359 =l mul %.7358, 64 + %.7360 =l add %.7357, %.7359 + %.7361 =l extsw 1 + %.7362 =l mul %.7361, 8 + %.7363 =l add %.7360, %.7362 + storel %.7363, %.7354 + %.7364 =l add %.7155, 512 + storel $g_23, %.7364 + %.7365 =l add %.7155, 520 + storel $g_173, %.7365 + %.7366 =l add %.7155, 528 + storel %.2036, %.7366 + %.7367 =l add %.7155, 536 + storel $g_173, %.7367 + %.7368 =l add %.7155, 544 + storel %.2036, %.7368 + %.7369 =l add %.7155, 552 + %.7370 =l extsw 0 + %.7371 =l copy %.7370 + storel %.7371, %.7369 + %.7372 =l add %.7155, 560 + storel $g_173, %.7372 + %.7373 =l add %.7155, 568 + %.7374 =l extsw 0 + %.7375 =l copy %.7374 + storel %.7375, %.7373 + %.7376 =l add %.7155, 576 + %.7377 =l extsw 0 + %.7378 =l copy %.7377 + storel %.7378, %.7376 + %.7379 =l add %.7155, 584 + storel %.2036, %.7379 + %.7380 =l add %.7155, 592 + %.7381 =l extsw 0 + %.7382 =l copy %.7381 + storel %.7382, %.7380 + %.7383 =l add %.7155, 600 + %.7384 =l extsw 0 + %.7385 =l copy %.7384 + storel %.7385, %.7383 + %.7386 =l add %.7155, 608 + %.7387 =l extsw 0 + %.7388 =l copy %.7387 + storel %.7388, %.7386 + %.7389 =l add %.7155, 616 + storel $g_23, %.7389 + %.7390 =l add %.7155, 624 + %.7391 =l extsw 0 + %.7392 =l copy %.7391 + storel %.7392, %.7390 + %.7393 =l add %.7155, 632 + %.7394 =l extsw 0 + %.7395 =l copy %.7394 + storel %.7395, %.7393 + %.7396 =l add %.7155, 640 + storel $g_23, %.7396 + %.7397 =l add %.7155, 648 + storel $g_23, %.7397 + %.7398 =l add %.7155, 656 + storel $g_23, %.7398 + %.7399 =l add %.7155, 664 + %.7400 =l extsw 0 + %.7401 =l copy %.7400 + storel %.7401, %.7399 + %.7402 =l add %.7155, 672 + %.7403 =l extsw 3 + %.7404 =l mul %.7403, 320 + %.7405 =l add %.7, %.7404 + %.7406 =l extsw 3 + %.7407 =l mul %.7406, 64 + %.7408 =l add %.7405, %.7407 + %.7409 =l extsw 1 + %.7410 =l mul %.7409, 8 + %.7411 =l add %.7408, %.7410 + storel %.7411, %.7402 + %.7412 =l add %.7155, 680 + storel %.2036, %.7412 + %.7413 =l add %.7155, 688 + %.7414 =l extsw 0 + %.7415 =l copy %.7414 + storel %.7415, %.7413 + %.7416 =l add %.7155, 696 + %.7417 =l extsw 3 + %.7418 =l mul %.7417, 320 + %.7419 =l add %.7, %.7418 + %.7420 =l extsw 3 + %.7421 =l mul %.7420, 64 + %.7422 =l add %.7419, %.7421 + %.7423 =l extsw 1 + %.7424 =l mul %.7423, 8 + %.7425 =l add %.7422, %.7424 + storel %.7425, %.7416 + %.7426 =l add %.7155, 704 + storel $g_173, %.7426 + %.7427 =l add %.7155, 712 + %.7428 =l extsw 0 + %.7429 =l copy %.7428 + storel %.7429, %.7427 + %.7430 =l add %.7155, 720 + storel $g_173, %.7430 + %.7431 =l add %.7155, 728 + %.7432 =l extsw 3 + %.7433 =l mul %.7432, 320 + %.7434 =l add %.7, %.7433 + %.7435 =l extsw 1 + %.7436 =l mul %.7435, 64 + %.7437 =l add %.7434, %.7436 + %.7438 =l extsw 2 + %.7439 =l mul %.7438, 8 + %.7440 =l add %.7437, %.7439 + storel %.7440, %.7431 + %.7441 =l add %.7155, 736 + storel %.2036, %.7441 + %.7442 =l add %.7155, 744 + storel $g_23, %.7442 + %.7443 =l add %.7155, 752 + storel $g_23, %.7443 + %.7444 =l add %.7155, 760 + storel %.2036, %.7444 + %.7445 =l add %.7155, 768 + storel $g_23, %.7445 + %.7446 =l add %.7155, 776 + %.7447 =l extsw 0 + %.7448 =l copy %.7447 + storel %.7448, %.7446 + %.7449 =l add %.7155, 784 + %.7450 =l extsw 0 + %.7451 =l copy %.7450 + storel %.7451, %.7449 + %.7452 =l add %.7155, 792 + %.7453 =l extsw 0 + %.7454 =l copy %.7453 + storel %.7454, %.7452 + %.7455 =l add %.7155, 800 + storel $g_23, %.7455 + %.7456 =l add %.7155, 808 + storel %.2036, %.7456 + %.7457 =l add %.7155, 816 + %.7458 =l extsw 3 + %.7459 =l mul %.7458, 320 + %.7460 =l add %.7, %.7459 + %.7461 =l extsw 3 + %.7462 =l mul %.7461, 64 + %.7463 =l add %.7460, %.7462 + %.7464 =l extsw 1 + %.7465 =l mul %.7464, 8 + %.7466 =l add %.7463, %.7465 + storel %.7466, %.7457 + %.7467 =l add %.7155, 824 + storel %.2036, %.7467 + %.7468 =l add %.7155, 832 + %.7469 =l extsw 0 + %.7470 =l copy %.7469 + storel %.7470, %.7468 + %.7471 =l add %.7155, 840 + %.7472 =l extsw 0 + %.7473 =l copy %.7472 + storel %.7473, %.7471 + %.7474 =l add %.7155, 848 + storel %.2036, %.7474 + %.7475 =l add %.7155, 856 + storel $g_23, %.7475 + %.7476 =l add %.7155, 864 + storel %.2036, %.7476 + %.7477 =l add %.7155, 872 + storel $g_23, %.7477 + %.7478 =l add %.7155, 880 + storel %.2036, %.7478 + %.7479 =l add %.7155, 888 + %.7480 =l extsw 0 + %.7481 =l copy %.7480 + storel %.7481, %.7479 + %.7482 =l add %.7155, 896 + %.7483 =l extsw 0 + %.7484 =l copy %.7483 + storel %.7484, %.7482 + %.7485 =l add %.7155, 904 + %.7486 =l extsw 3 + %.7487 =l mul %.7486, 320 + %.7488 =l add %.7, %.7487 + %.7489 =l extsw 3 + %.7490 =l mul %.7489, 64 + %.7491 =l add %.7488, %.7490 + %.7492 =l extsw 1 + %.7493 =l mul %.7492, 8 + %.7494 =l add %.7491, %.7493 + storel %.7494, %.7485 + %.7495 =l add %.7155, 912 + %.7496 =l extsw 2 + %.7497 =l mul %.7496, 320 + %.7498 =l add %.7, %.7497 + %.7499 =l extsw 2 + %.7500 =l mul %.7499, 64 + %.7501 =l add %.7498, %.7500 + %.7502 =l extsw 0 + %.7503 =l mul %.7502, 8 + %.7504 =l add %.7501, %.7503 + storel %.7504, %.7495 + %.7505 =l add %.7155, 920 + %.7506 =l extsw 3 + %.7507 =l mul %.7506, 320 + %.7508 =l add %.7, %.7507 + %.7509 =l extsw 1 + %.7510 =l mul %.7509, 64 + %.7511 =l add %.7508, %.7510 + %.7512 =l extsw 4 + %.7513 =l mul %.7512, 8 + %.7514 =l add %.7511, %.7513 + storel %.7514, %.7505 + %.7515 =l add %.7155, 928 + storel $g_173, %.7515 + %.7516 =l add %.7155, 936 + storel %.2036, %.7516 + %.7517 =l add %.7155, 944 + %.7518 =l extsw 0 + %.7519 =l copy %.7518 + storel %.7519, %.7517 + %.7520 =l add %.7155, 952 + storel %.2036, %.7520 + %.7521 =l add %.7155, 960 + storel $g_23, %.7521 + %.7522 =l add %.7155, 968 + %.7523 =l extsw 3 + %.7524 =l mul %.7523, 320 + %.7525 =l add %.7, %.7524 + %.7526 =l extsw 3 + %.7527 =l mul %.7526, 64 + %.7528 =l add %.7525, %.7527 + %.7529 =l extsw 1 + %.7530 =l mul %.7529, 8 + %.7531 =l add %.7528, %.7530 + storel %.7531, %.7522 + %.7532 =l add %.7155, 976 + storel %.2036, %.7532 + %.7533 =l add %.7155, 984 + storel $g_23, %.7533 + %.7534 =l add %.7155, 992 + storel $g_23, %.7534 + %.7535 =l add %.7155, 1000 + %.7536 =l extsw 3 + %.7537 =l mul %.7536, 320 + %.7538 =l add %.7, %.7537 + %.7539 =l extsw 3 + %.7540 =l mul %.7539, 64 + %.7541 =l add %.7538, %.7540 + %.7542 =l extsw 1 + %.7543 =l mul %.7542, 8 + %.7544 =l add %.7541, %.7543 + storel %.7544, %.7535 + %.7545 =l add %.7155, 1008 + storel %.2036, %.7545 + %.7546 =l add %.7155, 1016 + storel $g_23, %.7546 + %.7547 =l add %.7155, 1024 + %.7548 =l extsw 0 + %.7549 =l copy %.7548 + storel %.7549, %.7547 + %.7550 =l add %.7155, 1032 + %.7551 =l extsw 3 + %.7552 =l mul %.7551, 320 + %.7553 =l add %.7, %.7552 + %.7554 =l extsw 3 + %.7555 =l mul %.7554, 64 + %.7556 =l add %.7553, %.7555 + %.7557 =l extsw 1 + %.7558 =l mul %.7557, 8 + %.7559 =l add %.7556, %.7558 + storel %.7559, %.7550 + %.7560 =l add %.7155, 1040 + %.7561 =l extsw 0 + %.7562 =l copy %.7561 + storel %.7562, %.7560 + %.7563 =l add %.7155, 1048 + storel %.2036, %.7563 + %.7564 =l add %.7155, 1056 + %.7565 =l extsw 0 + %.7566 =l copy %.7565 + storel %.7566, %.7564 + %.7567 =l add %.7155, 1064 + %.7568 =l extsw 3 + %.7569 =l mul %.7568, 320 + %.7570 =l add %.7, %.7569 + %.7571 =l extsw 3 + %.7572 =l mul %.7571, 64 + %.7573 =l add %.7570, %.7572 + %.7574 =l extsw 1 + %.7575 =l mul %.7574, 8 + %.7576 =l add %.7573, %.7575 + storel %.7576, %.7567 + %.7577 =l add %.7155, 1072 + %.7578 =l extsw 3 + %.7579 =l mul %.7578, 320 + %.7580 =l add %.7, %.7579 + %.7581 =l extsw 1 + %.7582 =l mul %.7581, 64 + %.7583 =l add %.7580, %.7582 + %.7584 =l extsw 6 + %.7585 =l mul %.7584, 8 + %.7586 =l add %.7583, %.7585 + storel %.7586, %.7577 + %.7587 =l add %.7155, 1080 + %.7588 =l extsw 0 + %.7589 =l copy %.7588 + storel %.7589, %.7587 + %.7590 =l add %.7155, 1088 + storel $g_23, %.7590 + %.7591 =l add %.7155, 1096 + storel %.2036, %.7591 + %.7592 =l add %.7155, 1104 + storel %.2036, %.7592 + %.7593 =l add %.7155, 1112 + storel $g_23, %.7593 + %.7594 =l add %.7155, 1120 + %.7595 =l extsw 3 + %.7596 =l mul %.7595, 320 + %.7597 =l add %.7, %.7596 + %.7598 =l extsw 3 + %.7599 =l mul %.7598, 64 + %.7600 =l add %.7597, %.7599 + %.7601 =l extsw 1 + %.7602 =l mul %.7601, 8 + %.7603 =l add %.7600, %.7602 + storel %.7603, %.7594 + %.7604 =l add %.7155, 1128 + %.7605 =l extsw 0 + %.7606 =l copy %.7605 + storel %.7606, %.7604 + %.7607 =l add %.7155, 1136 + %.7608 =l extsw 3 + %.7609 =l mul %.7608, 320 + %.7610 =l add %.7, %.7609 + %.7611 =l extsw 3 + %.7612 =l mul %.7611, 64 + %.7613 =l add %.7610, %.7612 + %.7614 =l extsw 1 + %.7615 =l mul %.7614, 8 + %.7616 =l add %.7613, %.7615 + storel %.7616, %.7607 + %.7617 =l add %.7155, 1144 + storel $g_23, %.7617 + %.7618 =l add %.7155, 1152 + storel $g_173, %.7618 + %.7619 =l add %.7155, 1160 + storel %.2036, %.7619 + %.7620 =l add %.7155, 1168 + %.7621 =l extsw 0 + %.7622 =l copy %.7621 + storel %.7622, %.7620 + %.7623 =l add %.7155, 1176 + storel %.2036, %.7623 + %.7624 =l add %.7155, 1184 + storel %.2036, %.7624 + %.7625 =l add %.7155, 1192 + storel $g_173, %.7625 + %.7626 =l add %.7155, 1200 + storel %.2036, %.7626 + %.7627 =l add %.7155, 1208 + %.7628 =l extsw 3 + %.7629 =l mul %.7628, 320 + %.7630 =l add %.7, %.7629 + %.7631 =l extsw 1 + %.7632 =l mul %.7631, 64 + %.7633 =l add %.7630, %.7632 + %.7634 =l extsw 6 + %.7635 =l mul %.7634, 8 + %.7636 =l add %.7633, %.7635 + storel %.7636, %.7627 + %.7637 =l add %.7155, 1216 + storel %.2036, %.7637 + %.7638 =l add %.7155, 1224 + storel %.2036, %.7638 + %.7639 =l add %.7155, 1232 + %.7640 =l extsw 2 + %.7641 =l mul %.7640, 320 + %.7642 =l add %.7, %.7641 + %.7643 =l extsw 2 + %.7644 =l mul %.7643, 64 + %.7645 =l add %.7642, %.7644 + %.7646 =l extsw 0 + %.7647 =l mul %.7646, 8 + %.7648 =l add %.7645, %.7647 + storel %.7648, %.7639 + %.7649 =l add %.7155, 1240 + %.7650 =l extsw 0 + %.7651 =l copy %.7650 + storel %.7651, %.7649 + %.7652 =l add %.7155, 1248 + %.7653 =l extsw 0 + %.7654 =l copy %.7653 + storel %.7654, %.7652 + %.7655 =l add %.7155, 1256 + storel $g_173, %.7655 + %.7656 =l add %.7155, 1264 + storel %.2036, %.7656 + %.7657 =l add %.7155, 1272 + storel %.2036, %.7657 + %.7658 =l add %.7155, 1280 + %.7659 =l extsw 0 + %.7660 =l copy %.7659 + storel %.7660, %.7658 + %.7661 =l add %.7155, 1288 + storel $g_23, %.7661 + %.7662 =l add %.7155, 1296 + %.7663 =l extsw 4 + %.7664 =l mul %.7663, 320 + %.7665 =l add %.7, %.7664 + %.7666 =l extsw 0 + %.7667 =l mul %.7666, 64 + %.7668 =l add %.7665, %.7667 + %.7669 =l extsw 3 + %.7670 =l mul %.7669, 8 + %.7671 =l add %.7668, %.7670 + storel %.7671, %.7662 + %.7672 =l add %.7155, 1304 + storel $g_23, %.7672 + %.7673 =l add %.7155, 1312 + storel $g_23, %.7673 + %.7674 =l add %.7155, 1320 + %.7675 =l extsw 3 + %.7676 =l mul %.7675, 320 + %.7677 =l add %.7, %.7676 + %.7678 =l extsw 3 + %.7679 =l mul %.7678, 64 + %.7680 =l add %.7677, %.7679 + %.7681 =l extsw 1 + %.7682 =l mul %.7681, 8 + %.7683 =l add %.7680, %.7682 + storel %.7683, %.7674 + %.7684 =l add %.7155, 1328 + storel %.2036, %.7684 + %.7685 =l add %.7155, 1336 + storel $g_173, %.7685 + %.7686 =l add %.7155, 1344 + storel %.2036, %.7686 + %.7687 =l add %.7155, 1352 + %.7688 =l extsw 0 + %.7689 =l copy %.7688 + storel %.7689, %.7687 + %.7690 =l add %.7155, 1360 + %.7691 =l extsw 3 + %.7692 =l mul %.7691, 320 + %.7693 =l add %.7, %.7692 + %.7694 =l extsw 3 + %.7695 =l mul %.7694, 64 + %.7696 =l add %.7693, %.7695 + %.7697 =l extsw 1 + %.7698 =l mul %.7697, 8 + %.7699 =l add %.7696, %.7698 + storel %.7699, %.7690 + %.7700 =l add %.7155, 1368 + %.7701 =l extsw 3 + %.7702 =l mul %.7701, 320 + %.7703 =l add %.7, %.7702 + %.7704 =l extsw 3 + %.7705 =l mul %.7704, 64 + %.7706 =l add %.7703, %.7705 + %.7707 =l extsw 1 + %.7708 =l mul %.7707, 8 + %.7709 =l add %.7706, %.7708 + storel %.7709, %.7700 + %.7710 =l add %.7155, 1376 + %.7711 =l extsw 0 + %.7712 =l copy %.7711 + storel %.7712, %.7710 + %.7713 =l add %.7155, 1384 + storel %.2036, %.7713 + %.7714 =l add %.7155, 1392 + %.7715 =l extsw 3 + %.7716 =l mul %.7715, 320 + %.7717 =l add %.7, %.7716 + %.7718 =l extsw 3 + %.7719 =l mul %.7718, 64 + %.7720 =l add %.7717, %.7719 + %.7721 =l extsw 1 + %.7722 =l mul %.7721, 8 + %.7723 =l add %.7720, %.7722 + storel %.7723, %.7714 + %.7724 =l add %.7155, 1400 + storel %.2036, %.7724 + %.7725 =l add %.7155, 1408 + storel %.2036, %.7725 + %.7726 =l add %.7155, 1416 + storel $g_23, %.7726 + %.7727 =l add %.7155, 1424 + storel %.2036, %.7727 + %.7728 =l add %.7155, 1432 + storel $g_23, %.7728 + %.7729 =l add %.7155, 1440 + storel $g_23, %.7729 + %.7730 =l add %.7155, 1448 + %.7731 =l extsw 3 + %.7732 =l mul %.7731, 320 + %.7733 =l add %.7, %.7732 + %.7734 =l extsw 1 + %.7735 =l mul %.7734, 64 + %.7736 =l add %.7733, %.7735 + %.7737 =l extsw 2 + %.7738 =l mul %.7737, 8 + %.7739 =l add %.7736, %.7738 + storel %.7739, %.7730 + %.7740 =l add %.7155, 1456 + storel $g_173, %.7740 + %.7741 =l add %.7155, 1464 + storel %.2036, %.7741 + %.7742 =l add %.7155, 1472 + %.7743 =l extsw 0 + %.7744 =l copy %.7743 + storel %.7744, %.7742 + %.7745 =l add %.7155, 1480 + storel $g_173, %.7745 + %.7746 =l add %.7155, 1488 + storel $g_23, %.7746 + %.7747 =l add %.7155, 1496 + storel $g_173, %.7747 + %.7748 =l add %.7155, 1504 + storel $g_23, %.7748 + %.7749 =l add %.7155, 1512 + %.7750 =l extsw 3 + %.7751 =l mul %.7750, 320 + %.7752 =l add %.7, %.7751 + %.7753 =l extsw 1 + %.7754 =l mul %.7753, 64 + %.7755 =l add %.7752, %.7754 + %.7756 =l extsw 2 + %.7757 =l mul %.7756, 8 + %.7758 =l add %.7755, %.7757 + storel %.7758, %.7749 + %.7759 =l add %.7155, 1520 + storel %.2036, %.7759 + %.7760 =l add %.7155, 1528 + storel $g_23, %.7760 + %.7761 =l add %.7155, 1536 + storel $g_23, %.7761 + %.7762 =l add %.7155, 1544 + storel $g_23, %.7762 + %.7763 =l add %.7155, 1552 + storel %.2036, %.7763 + %.7764 =l add %.7155, 1560 + %.7765 =l extsw 0 + %.7766 =l mul %.7765, 320 + %.7767 =l add %.7, %.7766 + %.7768 =l extsw 0 + %.7769 =l mul %.7768, 64 + %.7770 =l add %.7767, %.7769 + %.7771 =l extsw 2 + %.7772 =l mul %.7771, 8 + %.7773 =l add %.7770, %.7772 + storel %.7773, %.7764 + %.7774 =l add %.7155, 1568 + %.7775 =l extsw 0 + %.7776 =l copy %.7775 + storel %.7776, %.7774 + %.7777 =l add %.7155, 1576 + %.7778 =l extsw 0 + %.7779 =l copy %.7778 + storel %.7779, %.7777 + %.7780 =l add %.7155, 1584 + storel %.2036, %.7780 + %.7781 =l add %.7155, 1592 + %.7782 =l extsw 0 + %.7783 =l copy %.7782 + storel %.7783, %.7781 + %.7784 =l add %.7155, 1600 + %.7785 =l extsw 0 + %.7786 =l copy %.7785 + storel %.7786, %.7784 + %.7787 =l add %.7155, 1608 + %.7788 =l extsw 0 + %.7789 =l copy %.7788 + storel %.7789, %.7787 + %.7790 =l add %.7155, 1616 + storel %.2036, %.7790 + %.7791 =l add %.7155, 1624 + %.7792 =l extsw 4 + %.7793 =l mul %.7792, 320 + %.7794 =l add %.7, %.7793 + %.7795 =l extsw 0 + %.7796 =l mul %.7795, 64 + %.7797 =l add %.7794, %.7796 + %.7798 =l extsw 3 + %.7799 =l mul %.7798, 8 + %.7800 =l add %.7797, %.7799 + storel %.7800, %.7791 + %.7801 =l add %.7155, 1632 + %.7802 =l extsw 4 + %.7803 =l mul %.7802, 320 + %.7804 =l add %.7, %.7803 + %.7805 =l extsw 0 + %.7806 =l mul %.7805, 64 + %.7807 =l add %.7804, %.7806 + %.7808 =l extsw 3 + %.7809 =l mul %.7808, 8 + %.7810 =l add %.7807, %.7809 + storel %.7810, %.7801 + %.7811 =l add %.7155, 1640 + storel %.2036, %.7811 + %.7812 =l add %.7155, 1648 + storel $g_173, %.7812 + %.7813 =l add %.7155, 1656 + %.7814 =l extsw 0 + %.7815 =l mul %.7814, 320 + %.7816 =l add %.7, %.7815 + %.7817 =l extsw 0 + %.7818 =l mul %.7817, 64 + %.7819 =l add %.7816, %.7818 + %.7820 =l extsw 2 + %.7821 =l mul %.7820, 8 + %.7822 =l add %.7819, %.7821 + storel %.7822, %.7813 + %.7823 =l add %.7155, 1664 + storel %.2036, %.7823 + %.7824 =l add %.7155, 1672 + storel $g_23, %.7824 + %.7826 =l add %.7825, 0 + storel $g_130, %.7826 + %.7830 =l extsw 1 + %.7831 =l mul %.7830, 8 + %.7832 =l add %.7110, %.7831 + %.7833 =l loadl %.7832 + %.7834 =w copy %.7833 + %.7835 =l loadl $g_296 + %.7836 =l loadl %.7835 + %.7837 =w loaduw %.4 + %.7838 =l loadl %.2026 + %.7839 =l extsw 1 + %.7840 =l mul %.7839, 8 + %.7841 =l add %.7110, %.7840 + %.7842 =w ceql %.7838, %.7841 + %.7843 =l loadl %.7142 + storel $g_81, %.7843 + %.7844 =w cnel $g_81, $g_81 + %.7845 =l loadl %.2036 + %.7846 =w loadsw %.7845 + %.7847 =w cslew %.7844, %.7846 + %.7848 =l extsw %.7847 + %.7849 =l loadl %.7144 + %.7850 =l xor %.7848, %.7849 + %.7851 =w copy %.7850 + %.7852 =l copy $g_265 + %.7853 =l mul 8, 1 + %.7854 =l add %.7852, %.7853 + %.7855 =l copy %.7854 + %.7856 =l loadl %.7855 + %.7857 =w copy %.7856 + %.7858 =w call $safe_mod_func_int16_t_s_s(w %.7851, w %.7857) + %.7859 =l extsh %.7858 + %.7860 =w cnel %.7859, 842 + %.7861 =l extsw %.7860 + %.7862 =w csgtl %.7861, 40679 + %.7863 =l extsw %.7862 + %.7864 =l and %.7863, 0 + %.7865 =w ceql %.7864, 34194 + %.7866 =w csgew %.7842, %.7865 + %.7867 =w cnel %.2030, %.2030 + %.7868 =w cnew %.7867, 0 + jnz %.7868, @logic_join.1442, @logic_right.1441 +@logic_right.1441 + %.7869 =l extsw 0 + %.7870 =l sub %.7869, 5 + %.7871 =w cnel %.7870, 0 +@logic_join.1442 + %.7872 =w phi @if_false.1243 %.7868, @logic_right.1441 %.7871 + %.7873 =l extsw 2 + %.7874 =l mul %.7873, 360 + %.7875 =l add %.250, %.7874 + %.7876 =l extsw 1 + %.7877 =l mul %.7876, 120 + %.7878 =l add %.7875, %.7877 + %.7879 =l extsw 1 + %.7880 =l mul %.7879, 20 + %.7881 =l add %.7878, %.7880 + %.7882 =w loaduw $g_115 + %.7883 =w copy %.7882 + %.7884 =w call $safe_mul_func_int16_t_s_s(w %.7834, w %.7883) + %.7885 =w extsh %.7884 + storew %.7885, %.7152 + %.7886 =l extsw %.7885 + %.7887 =w cslel %.7886, 49357 + %.7888 =l extsw 0 + %.7889 =l copy %.7888 + storel %.7889, %.2 + %.7890 =l copy $g_518 + %.7891 =l mul 24, 1 + %.7892 =l add %.7890, %.7891 + %.7893 =l copy %.7892 + %.7894 =l loadl %.7893 + %.7895 =w cnel %.7894, 0 + jnz %.7895, @if_true.1443, @if_false.1444 +@if_true.1443 + jmp @lbl_640.1445 +@if_false.1444 +@lbl_640.1445 + %.7896 =w copy 3 + storeb %.7896, %.2042 +@for_cond.1446 + %.7897 =w loadsb %.2042 + %.7898 =w extsb %.7897 + %.7899 =w csgew %.7898, 1 + jnz %.7899, @for_body.1447, @for_join.1449 +@for_body.1447 + %.7901 =l add %.7900, 0 + storel $g_185, %.7901 + %.7903 =l add %.7902, 0 + %.7904 =w copy 7 + storew %.7904, %.7903 + %.7906 =l add %.7905, 0 + %.7907 =w copy 132 + storeb %.7907, %.7906 + %.7908 =l extsw 2 + %.7909 =l mul %.7908, 360 + %.7910 =l add %.250, %.7909 + %.7911 =l extsw 1 + %.7912 =l mul %.7911, 120 + %.7913 =l add %.7910, %.7912 + %.7914 =l extsw 1 + %.7915 =l mul %.7914, 20 + %.7916 =l add %.7913, %.7915 + storel %.7916, %.2045 + %.7917 =w copy 0 + %.7918 =l copy $g_265 + %.7919 =l mul 32, 1 + %.7920 =l add %.7918, %.7919 + %.7921 =l copy %.7920 + storew %.7917, %.7921 +@for_cond.1450 + %.7922 =l copy $g_265 + %.7923 =l mul 32, 1 + %.7924 =l add %.7922, %.7923 + %.7925 =l copy %.7924 + %.7926 =w loaduw %.7925 + %.7927 =w copy 4 + %.7928 =w culew %.7926, %.7927 + jnz %.7928, @for_body.1451, @for_join.1453 +@for_body.1451 + %.7930 =l add %.7929, 0 + %.7931 =l extsw 3 + %.7932 =l mul %.7931, 8 + %.7933 =l add %.7110, %.7932 + storel %.7933, %.7930 + %.7934 =l add %.7929, 8 + %.7935 =l copy $g_185 + %.7936 =l mul 8, 1 + %.7937 =l add %.7935, %.7936 + %.7938 =l copy %.7937 + storel %.7938, %.7934 + %.7939 =l add %.7929, 16 + %.7940 =l copy $g_185 + %.7941 =l mul 8, 1 + %.7942 =l add %.7940, %.7941 + %.7943 =l copy %.7942 + storel %.7943, %.7939 + %.7944 =l add %.7929, 24 + %.7945 =l extsw 3 + %.7946 =l mul %.7945, 8 + %.7947 =l add %.7110, %.7946 + storel %.7947, %.7944 + %.7948 =l add %.7929, 32 + %.7949 =l copy $g_185 + %.7950 =l mul 8, 1 + %.7951 =l add %.7949, %.7950 + %.7952 =l copy %.7951 + storel %.7952, %.7948 + %.7953 =l add %.7929, 40 + %.7954 =l copy $g_185 + %.7955 =l mul 8, 1 + %.7956 =l add %.7954, %.7955 + %.7957 =l copy %.7956 + storel %.7957, %.7953 + %.7959 =l add %.7958, 0 + %.7960 =w copy 8 + storew %.7960, %.7959 + %.7962 =l loadl $g_82 + %.7963 =w loadub %.6 + %.7964 =l extub %.7963 + storel %.7964, $g_80 + %.7965 =l copy %.7964 + %.7966 =l or 14975625374428453295, %.7965 + %.7967 =w loadub %.6 + %.7968 =l extub %.7967 + %.7969 =l and %.7962, %.7968 + %.7970 =l copy $g_518 + %.7971 =l mul 24, 1 + %.7972 =l add %.7970, %.7971 + %.7973 =l copy %.7972 + %.7974 =l loadl %.7973 + %.7975 =w copy 37368 + %.7976 =w call $safe_rshift_func_uint16_t_u_s(w %.7975, w 2) + %.7977 =w extuh %.7976 + %.7978 =l loadl %.2005 + %.7979 =w loadsb %.7978 + %.7980 =w extsb %.7979 + %.7981 =w loadub %.6 + %.7982 =w extub %.7981 + %.7983 =w copy 46361 + %.7984 =l copy $g_518 + %.7985 =l mul 24, 1 + %.7986 =l add %.7984, %.7985 + %.7987 =l copy %.7986 + %.7988 =l loadl %.7987 + %.7989 =l copy 2 + %.7990 =l and %.7988, %.7989 + %.7991 =w copy %.7990 + %.7992 =w call $safe_sub_func_int16_t_s_s(w %.7983, w %.7991) + %.7993 =w extsh %.7992 + %.7994 =w cnew %.7982, %.7993 + %.7995 =w or %.7980, %.7994 + %.7996 =w copy %.7995 + storeb %.7996, %.7978 + %.7997 =w extsb %.7996 + %.7998 =w ceqw %.7977, %.7997 + %.7999 =w cnew %.7998, 0 + jnz %.7999, @logic_join.1455, @logic_right.1454 +@logic_right.1454 + %.8000 =l loadl $g_422 + %.8001 =w loaduw %.8000 + %.8002 =w cnew %.8001, 0 +@logic_join.1455 + %.8003 =w phi @for_body.1451 %.7999, @logic_right.1454 %.8002 + %.8004 =w copy %.8003 + %.8005 =w copy 7 + %.8006 =w call $safe_lshift_func_uint8_t_u_u(w %.8004, w %.8005) + %.8007 =w extub %.8006 + storew %.8007, %.7958 + %.8008 =l copy $g_185 + %.8009 =l mul 48, 1 + %.8010 =l add %.8008, %.8009 + %.8011 =l copy %.8010 + storew 0, %.8011 +@for_cond.1456 + %.8012 =l copy $g_185 + %.8013 =l mul 48, 1 + %.8014 =l add %.8012, %.8013 + %.8015 =l copy %.8014 + %.8016 =w loadsw %.8015 + %.8017 =w cslew %.8016, 4 + jnz %.8017, @for_body.1457, @for_join.1459 +@for_body.1457 + %.8019 =l add %.8018, 0 + storel $g_629, %.8019 + %.8021 =l add %.8020, 0 + storel %.2042, %.8021 + %.8022 =l add %.8020, 8 + storel $g_631, %.8022 + %.8023 =l add %.8020, 16 + storel %.2042, %.8023 + %.8024 =l add %.8020, 24 + %.8025 =l extsw 0 + %.8026 =l copy %.8025 + storel %.8026, %.8024 + %.8027 =l add %.8020, 32 + storel %.2042, %.8027 + %.8028 =l add %.8020, 40 + storel $g_631, %.8028 + %.8029 =l add %.8020, 48 + %.8030 =l extsw 0 + %.8031 =l copy %.8030 + storel %.8031, %.8029 + %.8032 =l add %.8020, 56 + storel $g_631, %.8032 + %.8033 =l add %.8020, 64 + %.8034 =l extsw 0 + %.8035 =l copy %.8034 + storel %.8035, %.8033 + %.8036 =l add %.8020, 72 + storel $g_631, %.8036 + %.8037 =l add %.8020, 80 + storel $g_2, %.8037 + %.8038 =l add %.8020, 88 + storel $g_631, %.8038 + %.8039 =l add %.8020, 96 + %.8040 =l extsw 0 + %.8041 =l copy %.8040 + storel %.8041, %.8039 + %.8042 =l add %.8020, 104 + storel $g_631, %.8042 + %.8043 =l add %.8020, 112 + storel %.2042, %.8043 + %.8044 =l add %.8020, 120 + storel %.2042, %.8044 + %.8045 =l add %.8020, 128 + storel %.2042, %.8045 + %.8046 =l add %.8020, 136 + storel $g_631, %.8046 + %.8047 =l add %.8020, 144 + storel $g_631, %.8047 + %.8048 =l add %.8020, 152 + storel $g_631, %.8048 + %.8049 =l add %.8020, 160 + %.8050 =l extsw 0 + %.8051 =l copy %.8050 + storel %.8051, %.8049 + %.8052 =l add %.8020, 168 + %.8053 =l extsw 0 + %.8054 =l copy %.8053 + storel %.8054, %.8052 + %.8055 =l add %.8020, 176 + storel $g_631, %.8055 + %.8056 =l add %.8020, 184 + storel $g_631, %.8056 + %.8057 =l add %.8020, 192 + storel %.2042, %.8057 + %.8058 =l add %.8020, 200 + storel %.2042, %.8058 + %.8059 =l add %.8020, 208 + storel $g_631, %.8059 + %.8060 =l add %.8020, 216 + storel %.2042, %.8060 + %.8061 =l add %.8020, 224 + storel %.2042, %.8061 + %.8062 =l add %.8020, 232 + storel %.2042, %.8062 + %.8063 =l add %.8020, 240 + storel %.2042, %.8063 + %.8064 =l add %.8020, 248 + storel %.2042, %.8064 + %.8066 =l add %.8065, 0 + %.8067 =w copy 0 + storew %.8067, %.8066 + %.8071 =w copy 0 + storeh %.8071, $g_425 +@for_cond.1460 + %.8072 =w loaduh $g_425 + %.8073 =w extuh %.8072 + %.8074 =w cslew %.8073, 7 + jnz %.8074, @for_body.1461, @for_join.1463 +@for_body.1461 + %.8078 =w loadsb %.2042 + %.8079 =w extsb %.8078 + %.8080 =w add %.8079, 1 + %.8081 =l extsw %.8080 + %.8082 =l mul %.8081, 320 + %.8083 =l add %.7, %.8082 + %.8084 =w loadsb %.2042 + %.8085 =l extsb %.8084 + %.8086 =l mul %.8085, 64 + %.8087 =l add %.8083, %.8086 + %.8088 =w loadsb %.2042 + %.8089 =l extsb %.8088 + %.8090 =l mul %.8089, 8 + %.8091 =l add %.8087, %.8090 + %.8092 =l loadl %.8091 + %.8093 =w loadsb %.2042 + %.8094 =w extsb %.8093 + %.8095 =w add %.8094, 2 + %.8096 =l extsw %.8095 + %.8097 =l mul %.8096, 320 + %.8098 =l add %.7, %.8097 + %.8099 =l copy $g_265 + %.8100 =l mul 32, 1 + %.8101 =l add %.8099, %.8100 + %.8102 =l copy %.8101 + %.8103 =w loaduw %.8102 + %.8104 =l extuw %.8103 + %.8105 =l mul %.8104, 64 + %.8106 =l add %.8098, %.8105 + %.8107 =w loadsb %.2042 + %.8108 =w extsb %.8107 + %.8109 =w add %.8108, 1 + %.8110 =l extsw %.8109 + %.8111 =l mul %.8110, 8 + %.8112 =l add %.8106, %.8111 + storel %.8092, %.8112 + %.8113 =l loadl %.7900 + %.8114 =l loadl $g_296 + storel %.8113, %.8114 +@for_cont.1462 + %.8115 =w loaduh $g_425 + %.8116 =w extuh %.8115 + %.8117 =w add %.8116, 1 + %.8118 =w copy %.8117 + storeh %.8118, $g_425 + jmp @for_cond.1460 +@for_join.1463 + %.8119 =w loaduw %.4 + %.8120 =l extuw %.8119 + %.8121 =w loadub %.6 + %.8122 =w extub %.8121 + %.8123 =w loaduh $g_619 + %.8124 =w extuh %.8123 + %.8125 =w cnew %.8124, 0 + jnz %.8125, @logic_right.1464, @logic_join.1465 +@logic_right.1464 + %.8126 =w copy 8 + %.8127 =l loadl %.7825 + %.8128 =l extsw 0 + %.8129 =w ceql %.8127, %.8128 + %.8130 =w call $safe_rshift_func_uint16_t_u_s(w %.8126, w %.8129) + %.8131 =w extuh %.8130 + %.8132 =w ceql $g_201, %.7900 + %.8133 =w copy %.8132 + %.8134 =l loadl %.2005 + storeb %.8133, %.8134 + %.8135 =w call $safe_lshift_func_int8_t_s_s(w %.8133, w 3) + %.8136 =w copy %.8135 + %.8137 =w loadsw %.7958 + %.8138 =w copy %.8137 + %.8139 =w call $safe_div_func_uint8_t_u_u(w %.8136, w %.8138) + %.8140 =w extub %.8139 + %.8141 =w xor %.8140, 18446744073709551615 + %.8142 =w and %.8131, %.8141 + %.8143 =w cnew %.8142, 0 +@logic_join.1465 + %.8144 =w phi @for_join.1463 %.8125, @logic_right.1464 %.8143 + %.8145 =l extsw %.8144 + %.8146 =l or %.8145, 98 + %.8147 =w copy %.8146 + %.8148 =l loadl %.8018 + storeb %.8147, %.8148 + %.8149 =w extsb %.8147 + storew %.8149, %.7902 + %.8150 =w copy %.8149 + %.8151 =w loaduw %.4 + %.8152 =w cultw %.8150, %.8151 + %.8153 =l extsw %.8152 + %.8154 =l copy 0 + %.8155 =l call $safe_sub_func_int64_t_s_s(l %.8153, l %.8154) + %.8156 =w loadub %.6 + %.8157 =l extub %.8156 + %.8158 =l call $safe_unary_minus_func_int64_t_s(l %.8157) + %.8159 =w loadub %.6 + %.8160 =l extub %.8159 + %.8161 =w csgtl %.8158, %.8160 + %.8162 =w call $safe_lshift_func_uint16_t_u_s(w %.8122, w %.8161) + %.8163 =l copy $g_265 + %.8164 =l mul 44, 1 + %.8165 =l add %.8163, %.8164 + %.8166 =l copy %.8165 + %.8167 =w loadsw %.8166 + %.8168 =l copy 1 + %.8169 =w loaduw %.4 + %.8170 =l extuw %.8169 + %.8171 =l call $safe_add_func_int64_t_s_s(l %.8168, l %.8170) + %.8172 =w loadsw %.8065 + %.8173 =l extsw %.8172 + %.8174 =l or %.8171, %.8173 + %.8175 =w copy %.8174 + %.8176 =w copy 50742 + %.8177 =w call $safe_mul_func_uint16_t_u_u(w %.8175, w %.8176) + %.8178 =l copy $g_265 + %.8179 =l mul 24, 1 + %.8180 =l add %.8178, %.8179 + %.8181 =l copy %.8180 + %.8182 =l loadl %.8181 + %.8183 =w cnel %.8120, %.8182 + %.8184 =l extsw %.8183 + %.8185 =l xor %.8184, 65535 + %.8186 =w cnel %.8185, 0 + jnz %.8186, @if_true.1466, @if_false.1467 +@if_true.1466 + %.8188 =l add %.8187, 0 + storel $g_634, %.8188 + %.8192 =l copy $g_265 + %.8193 =l mul 32, 1 + %.8194 =l add %.8192, %.8193 + %.8195 =l copy %.8194 + %.8196 =w loaduw %.8195 + %.8197 =w loaduw %.4 + %.8198 =l loadl $g_634 + %.8199 =l loadl %.8187 + storel %.8198, %.8199 + %.8200 =l copy $g_265 + %.8201 =l mul 32, 1 + %.8202 =l add %.8200, %.8201 + %.8203 =l copy %.8202 + %.8204 =w loaduw %.8203 + %.8205 =l extuw %.8204 + %.8206 =l mul %.8205, 320 + %.8207 =l add %.7, %.8206 + %.8208 =w loadsb %.2042 + %.8209 =w extsb %.8208 + %.8210 =w add %.8209, 1 + %.8211 =l extsw %.8210 + %.8212 =l mul %.8211, 64 + %.8213 =l add %.8207, %.8212 + %.8214 =l copy $g_185 + %.8215 =l mul 48, 1 + %.8216 =l add %.8214, %.8215 + %.8217 =l copy %.8216 + %.8218 =w loadsw %.8217 + %.8219 =l extsw %.8218 + %.8220 =l mul %.8219, 8 + %.8221 =l add %.8213, %.8220 + %.8222 =l loadl %.8221 + %.8223 =l copy $g_265 + %.8224 =l mul 32, 1 + %.8225 =l add %.8223, %.8224 + %.8226 =l copy %.8225 + %.8227 =w loaduw %.8226 + %.8228 =w copy 1 + %.8229 =w add %.8227, %.8228 + %.8230 =l extuw %.8229 + %.8231 =l mul %.8230, 320 + %.8232 =l add %.7, %.8231 + %.8233 =w loadsb %.2042 + %.8234 =w extsb %.8233 + %.8235 =w add %.8234, 1 + %.8236 =l extsw %.8235 + %.8237 =l mul %.8236, 64 + %.8238 =l add %.8232, %.8237 + %.8239 =l copy $g_265 + %.8240 =l mul 32, 1 + %.8241 =l add %.8239, %.8240 + %.8242 =l copy %.8241 + %.8243 =w loaduw %.8242 + %.8244 =l extuw %.8243 + %.8245 =l mul %.8244, 8 + %.8246 =l add %.8238, %.8245 + storel %.8222, %.8246 + jmp @if_join.1468 +@if_false.1467 + %.8248 =l add %.8247, 0 + %.8249 =w copy 0 + storew %.8249, %.8248 + %.8250 =l add %.8247, 4 + %.8251 =w copy 786565377 + storew %.8251, %.8250 + %.8252 =l add %.8247, 8 + %.8253 =w copy 1 + storeh %.8253, %.8252 + %.8254 =l add %.8247, 10 + storeh 0, %.8254 + %.8255 =l add %.8247, 12 + %.8256 =w copy 3536423064 + storew %.8256, %.8255 + %.8257 =l add %.8247, 16 + %.8258 =w copy 1245577790 + storew %.8258, %.8257 + %.8259 =l loadl %.7825 + %.8260 =l loaduw %.8247 + storew %.8260, %.8259 + %.8261 =l add %.8247, 4 + %.8262 =l add %.8259, 4 + %.8263 =l loaduw %.8261 + storew %.8263, %.8262 + %.8264 =l add %.8261, 4 + %.8265 =l add %.8262, 4 + %.8266 =l loaduw %.8264 + storew %.8266, %.8265 + %.8267 =l add %.8264, 4 + %.8268 =l add %.8265, 4 + %.8269 =l loaduw %.8267 + storew %.8269, %.8268 + %.8270 =l add %.8267, 4 + %.8271 =l add %.8268, 4 + %.8272 =l loaduw %.8270 + storew %.8272, %.8271 + %.8273 =l add %.8270, 4 + %.8274 =l add %.8271, 4 +@if_join.1468 + %.8275 =w loadub %.7905 + %.8276 =w sub %.8275, 1 + storeb %.8276, %.7905 +@for_cont.1458 + %.8277 =l copy $g_185 + %.8278 =l mul 48, 1 + %.8279 =l add %.8277, %.8278 + %.8280 =l copy %.8279 + %.8281 =w loadsw %.8280 + %.8282 =w add %.8281, 1 + storew %.8282, %.8280 + jmp @for_cond.1456 +@for_join.1459 +@for_cont.1452 + %.8283 =l copy $g_265 + %.8284 =l mul 32, 1 + %.8285 =l add %.8283, %.8284 + %.8286 =l copy %.8285 + %.8287 =w loaduw %.8286 + %.8288 =w copy 1 + %.8289 =w add %.8287, %.8288 + storew %.8289, %.8286 + jmp @for_cond.1450 +@for_join.1453 +@for_cont.1448 + %.8290 =w loadsb %.2042 + %.8291 =w extsb %.8290 + %.8292 =w sub %.8291, 1 + %.8293 =w copy %.8292 + storeb %.8293, %.2042 + jmp @for_cond.1446 +@for_join.1449 + %.8294 =l loadl $g_173 + %.8295 =w loadsw %.8294 + %.8296 =l loadl %.2036 + storew %.8295, %.8296 +@if_join.1440 + %.8297 =l loadl $g_88 + %.8298 =l loadl %.8297 + %.8299 =l loadl %.8298 + ret %.8299 +} +function w $func_41(l %.1, w %.3) { +@start.1469 + %.2 =l alloc8 8 + storel %.1, %.2 + %.4 =l alloc4 1 + storeb %.3, %.4 + %.5 =l alloc8 72 + %.15 =l alloc4 4 +@body.1470 + %.6 =l add %.5, 0 + storel $g_50, %.6 + %.7 =l add %.5, 8 + storel $g_50, %.7 + %.8 =l add %.5, 16 + storel $g_50, %.8 + %.9 =l add %.5, 24 + storel $g_50, %.9 + %.10 =l add %.5, 32 + storel $g_50, %.10 + %.11 =l add %.5, 40 + storel $g_50, %.11 + %.12 =l add %.5, 48 + storel $g_50, %.12 + %.13 =l add %.5, 56 + storel $g_50, %.13 + %.14 =l add %.5, 64 + storel $g_50, %.14 + %.16 =w loadsw $g_50 + %.17 =l loadl $g_23 + %.18 =w loadsw %.17 + %.19 =w xor %.16, %.18 + storew %.19, $g_50 + %.20 =w loadub %.4 + %.21 =w extub %.20 + ret %.21 +} +function w $func_51(w %.1) { +@start.1471 + %.2 =l alloc4 4 + storew %.1, %.2 + %.3 =l alloc4 4 + %.6 =l alloc8 8 + %.8 =l alloc8 8 + %.10 =l alloc4 28 + %.25 =l alloc4 4 + %.28 =l alloc4 4 + %.31 =l alloc4 1 + %.34 =l alloc4 4 + %.37 =l alloc4 4 + %.40 =l alloc8 8 + %.42 =l alloc8 8 + %.44 =l alloc4 4 + %.60 =l alloc8 8 + %.62 =l alloc8 8 + %.64 =l alloc8 8 + %.66 =l alloc8 8 + %.68 =l alloc4 4 + %.73 =l alloc8 8 + %.75 =l alloc8 8 + %.77 =l alloc8 8 + %.79 =l alloc4 20 + %.90 =l alloc8 8 + %.92 =l alloc8 8 + %.94 =l alloc4 2 + %.97 =l alloc8 8 + %.102 =l alloc8 8 + %.106 =l alloc8 8 + %.108 =l alloc8 72 + %.109 =l alloc8 8 + %.111 =l alloc4 4 + %.220 =l alloc8 8 + %.222 =l alloc4 4 + %.225 =l alloc8 392 + %.275 =l alloc8 8 + %.279 =l alloc8 1176 + %.817 =l alloc4 4 + %.818 =l alloc4 4 + %.819 =l alloc4 4 +@body.1472 + %.4 =l add %.3, 0 + %.5 =w copy 1159863208 + storew %.5, %.4 + %.7 =l add %.6, 0 + storel $g_46, %.7 + %.9 =l add %.8, 0 + storel $g_57, %.9 + %.11 =l add %.10, 0 + %.12 =w copy 4248860344 + storew %.12, %.11 + %.13 =l add %.10, 4 + %.14 =w copy 4248860344 + storew %.14, %.13 + %.15 =l add %.10, 8 + %.16 =w copy 4248860344 + storew %.16, %.15 + %.17 =l add %.10, 12 + %.18 =w copy 4248860344 + storew %.18, %.17 + %.19 =l add %.10, 16 + %.20 =w copy 4248860344 + storew %.20, %.19 + %.21 =l add %.10, 20 + %.22 =w copy 4248860344 + storew %.22, %.21 + %.23 =l add %.10, 24 + %.24 =w copy 4248860344 + storew %.24, %.23 + %.26 =l add %.25, 0 + %.27 =w copy 1082973296 + storew %.27, %.26 + %.29 =l add %.28, 0 + %.30 =w copy 3433997516 + storew %.30, %.29 + %.32 =l add %.31, 0 + %.33 =w copy 250 + storeb %.33, %.32 + %.35 =l add %.34, 0 + %.36 =w copy 1749978495 + storew %.36, %.35 + %.38 =l add %.37, 0 + %.39 =w copy 3489158437 + storew %.39, %.38 + %.41 =l add %.40, 0 + storel $g_84, %.41 + %.43 =l add %.42, 0 + storel %.34, %.43 + %.45 =w loadsw %.3 + %.46 =w loadsw %.3 + %.47 =w loadsw %.3 + %.48 =w cnew %.47, 0 + jnz %.48, @logic_right.1473, @logic_join.1474 +@logic_right.1473 + %.49 =w cnel 306100898, 0 +@logic_join.1474 + %.50 =w phi @body.1472 %.48, @logic_right.1473 %.49 + %.51 =w xor %.46, %.50 + %.52 =w copy %.51 + %.53 =l loadl %.6 + storeb %.52, %.53 + %.54 =l loadl %.8 + storeb %.52, %.54 + %.55 =w extub %.52 + %.56 =w ceqw %.45, %.55 + %.57 =l extsw %.56 + storel %.57, $g_58 + storew 0, $g_50 +@for_cond.1475 + %.58 =w loadsw $g_50 + %.59 =w cslew %.58, 6 + jnz %.59, @for_body.1476, @for_join.1478 +@for_body.1476 + %.61 =l add %.60, 0 + storel %.3, %.61 + %.63 =l add %.62, 0 + storel %.3, %.63 + %.65 =l add %.64, 0 + storel %.3, %.65 + %.67 =l add %.66, 0 + storel %.3, %.67 + %.69 =l add %.68, 0 + %.70 =l extsw 0 + %.71 =l sub %.70, 1 + %.72 =w copy %.71 + storew %.72, %.69 + %.74 =l add %.73, 0 + storel %.68, %.74 + %.76 =l add %.75, 0 + storel %.68, %.76 + %.78 =l add %.77, 0 + storel %.68, %.78 + %.80 =l add %.79, 0 + %.81 =w copy 595471528 + storew %.81, %.80 + %.82 =l add %.79, 4 + %.83 =w copy 595471528 + storew %.83, %.82 + %.84 =l add %.79, 8 + %.85 =w copy 595471528 + storew %.85, %.84 + %.86 =l add %.79, 12 + %.87 =w copy 595471528 + storew %.87, %.86 + %.88 =l add %.79, 16 + %.89 =w copy 595471528 + storew %.89, %.88 + %.91 =l add %.90, 0 + storel %.68, %.91 + %.93 =l add %.92, 0 + storel %.3, %.93 + %.95 =l add %.94, 0 + %.96 =w copy 58364 + storeh %.96, %.95 + %.98 =l add %.97, 0 + %.99 =l extsw 1 + %.100 =l mul %.99, 4 + %.101 =l add %.79, %.100 + storel %.101, %.98 + %.103 =l add %.102, 0 + %.104 =l extsw 0 + %.105 =l copy %.104 + storel %.105, %.103 + %.107 =l add %.106, 0 + storel %.68, %.107 + %.110 =l add %.109, 0 + storel $g_38, %.110 + storew 0, %.111 +@for_cond.1479 + %.112 =w loadsw %.111 + %.113 =w csltw %.112, 9 + jnz %.113, @for_body.1480, @for_join.1482 +@for_body.1480 + %.114 =w loadsw %.111 + %.115 =l extsw %.114 + %.116 =l mul %.115, 8 + %.117 =l add %.108, %.116 + storel %.68, %.117 +@for_cont.1481 + %.118 =w loadsw %.111 + %.119 =w add %.118, 1 + storew %.119, %.111 + jmp @for_cond.1479 +@for_join.1482 + %.120 =w loaduw $g_84 + %.121 =w sub %.120, 1 + storew %.121, $g_84 + %.122 =l loadl %.77 + %.123 =w loadsw %.122 + %.124 =w copy %.123 + %.125 =w loadsw $g_50 + %.126 =l extsw %.125 + %.127 =l mul %.126, 4 + %.128 =l add %.10, %.127 + %.129 =w loaduw %.128 + %.130 =w and %.124, %.129 + %.131 =w copy %.130 + storew %.131, %.122 + %.132 =l extsw 4 + storel %.132, $g_80 +@for_cond.1483 + %.133 =l loadl $g_80 + %.134 =l extsw 1 + %.135 =w csgel %.133, %.134 + jnz %.135, @for_body.1484, @for_join.1486 +@for_body.1484 + %.136 =l loadl %.109 + storel %.136, $g_88 +@for_cont.1485 + %.137 =l loadl $g_80 + %.138 =l extsw 1 + %.139 =l sub %.137, %.138 + storel %.139, $g_80 + jmp @for_cond.1483 +@for_join.1486 + storew 2, %.25 +@for_cond.1487 + %.140 =w loadsw %.25 + %.141 =w cslew %.140, 6 + jnz %.141, @for_body.1488, @for_join.1490 +@for_body.1488 + %.142 =l loadl $g_23 + %.143 =w loadsw %.142 + %.144 =w cnew %.143, 0 + jnz %.144, @if_true.1491, @if_false.1492 +@if_true.1491 + jmp @for_join.1490 +@if_false.1492 + %.145 =l loadl $g_23 + %.146 =w loadsw %.145 + %.147 =w cnew %.146, 0 + jnz %.147, @if_true.1493, @if_false.1494 +@if_true.1493 + jmp @for_cont.1489 +@if_false.1494 + %.148 =l loadl $g_23 + %.149 =w loadsw %.148 + %.150 =w cnew %.149, 0 + jnz %.150, @if_true.1495, @if_false.1496 +@if_true.1495 + jmp @for_join.1490 +@if_false.1496 +@for_cont.1489 + %.151 =w loadsw %.25 + %.152 =w add %.151, 1 + storew %.152, %.25 + jmp @for_cond.1487 +@for_join.1490 +@for_cont.1477 + %.153 =w loadsw $g_50 + %.154 =w add %.153, 1 + storew %.154, $g_50 + jmp @for_cond.1475 +@for_join.1478 + %.155 =l loadl %.42 + %.156 =w loadsw %.155 + %.157 =w copy 0 + %.158 =w call $safe_lshift_func_int8_t_s_s(w %.157, w 5) + %.159 =w copy 28 + %.160 =w loadsw $g_50 + %.161 =l loadl $g_80 + %.162 =w copy %.161 + %.163 =w loadsb %.31 + %.164 =w extsb %.163 + %.165 =w call $safe_mul_func_uint16_t_u_u(w %.162, w %.164) + %.166 =w extuh %.165 + %.167 =w csltw %.160, %.166 + %.168 =w loadsw %.2 + %.169 =w copy %.168 + %.170 =w loadsw %.37 + %.171 =l loadl $g_58 + %.172 =l loadl %.40 + %.173 =w loaduw %.172 + %.174 =w loadsh $g_81 + %.175 =w extsh %.174 + %.176 =w and %.173, %.175 + storew %.176, %.172 + %.177 =w copy 46 + %.178 =w loadsw %.2 + %.179 =w copy %.178 + %.180 =w call $safe_mul_func_uint8_t_u_u(w %.177, w %.179) + %.181 =w loadsw %.25 + %.182 =l extsw 0 + %.183 =w cnel %.25, %.182 + %.184 =w loadsw %.2 + %.185 =w copy %.184 + %.186 =w copy 251 + %.187 =w call $safe_mul_func_uint8_t_u_u(w %.185, w %.186) + %.188 =w loadsw %.2 + %.189 =w copy %.188 + %.190 =w call $safe_mod_func_uint8_t_u_u(w %.187, w %.189) + %.191 =l extub %.190 + %.192 =l and %.171, %.191 + storel %.192, $g_58 + %.193 =w loadsb %.31 + %.194 =l extsb %.193 + %.195 =w cugtl %.192, %.194 + %.196 =w ceqw %.195, 0 + %.197 =l extsw %.196 + %.198 =w csgel %.197, 7466234982433381138 + %.199 =w or %.170, %.198 + storew %.199, %.37 + %.200 =w loadsw $g_24 + %.201 =w call $safe_mod_func_int32_t_s_s(w %.199, w %.200) + %.202 =w copy %.201 + %.203 =w call $safe_mul_func_uint8_t_u_u(w %.169, w %.202) + %.204 =w extub %.203 + %.205 =w call $safe_lshift_func_uint16_t_u_s(w %.204, w 3) + %.206 =w extuh %.205 + %.207 =w csgew %.167, %.206 + %.208 =w copy %.207 + %.209 =w loadsw %.2 + %.210 =w call $safe_rshift_func_int16_t_s_s(w %.208, w %.209) + %.211 =w copy %.210 + %.212 =w call $safe_div_func_int8_t_s_s(w %.159, w %.211) + %.213 =w extsb %.158 + %.214 =w extsb %.212 + %.215 =w csgtw %.213, %.214 + %.216 =w and %.156, %.215 + storew %.216, %.155 + %.217 =w cnew %.216, 0 + jnz %.217, @if_true.1497, @if_false.1498 +@if_true.1497 + %.218 =w loadsw %.2 + %.219 =w copy %.218 + ret %.219 +@if_false.1498 + %.221 =l add %.220, 0 + storel %.37, %.221 + %.223 =l add %.222, 0 + %.224 =w copy 4261573277 + storew %.224, %.223 + %.226 =l add %.225, 0 + storel %.28, %.226 + %.227 =l add %.225, 8 + storel %.28, %.227 + %.228 =l add %.225, 16 + storel %.3, %.228 + %.229 =l add %.225, 24 + storel %.28, %.229 + %.230 =l add %.225, 32 + storel %.28, %.230 + %.231 =l add %.225, 40 + storel %.3, %.231 + %.232 =l add %.225, 48 + storel %.28, %.232 + %.233 =l add %.225, 56 + storel %.28, %.233 + %.234 =l add %.225, 64 + storel %.34, %.234 + %.235 =l add %.225, 72 + storel %.34, %.235 + %.236 =l add %.225, 80 + storel %.28, %.236 + %.237 =l add %.225, 88 + storel %.34, %.237 + %.238 =l add %.225, 96 + storel %.34, %.238 + %.239 =l add %.225, 104 + storel %.28, %.239 + %.240 =l add %.225, 112 + storel %.34, %.240 + %.241 =l add %.225, 120 + storel %.28, %.241 + %.242 =l add %.225, 128 + storel %.34, %.242 + %.243 =l add %.225, 136 + storel %.34, %.243 + %.244 =l add %.225, 144 + storel %.28, %.244 + %.245 =l add %.225, 152 + storel %.34, %.245 + %.246 =l add %.225, 160 + storel %.34, %.246 + %.247 =l add %.225, 168 + storel %.28, %.247 + %.248 =l add %.225, 176 + storel %.28, %.248 + %.249 =l add %.225, 184 + storel %.3, %.249 + %.250 =l add %.225, 192 + storel %.28, %.250 + %.251 =l add %.225, 200 + storel %.28, %.251 + %.252 =l add %.225, 208 + storel %.3, %.252 + %.253 =l add %.225, 216 + storel %.28, %.253 + %.254 =l add %.225, 224 + storel %.28, %.254 + %.255 =l add %.225, 232 + storel %.34, %.255 + %.256 =l add %.225, 240 + storel %.34, %.256 + %.257 =l add %.225, 248 + storel %.28, %.257 + %.258 =l add %.225, 256 + storel %.34, %.258 + %.259 =l add %.225, 264 + storel %.34, %.259 + %.260 =l add %.225, 272 + storel %.28, %.260 + %.261 =l add %.225, 280 + storel %.34, %.261 + %.262 =l add %.225, 288 + storel %.28, %.262 + %.263 =l add %.225, 296 + storel %.34, %.263 + %.264 =l add %.225, 304 + storel %.34, %.264 + %.265 =l add %.225, 312 + storel %.28, %.265 + %.266 =l add %.225, 320 + storel %.34, %.266 + %.267 =l add %.225, 328 + storel %.34, %.267 + %.268 =l add %.225, 336 + storel %.28, %.268 + %.269 =l add %.225, 344 + storel %.34, %.269 + %.270 =l add %.225, 352 + storel %.28, %.270 + %.271 =l add %.225, 360 + storel %.34, %.271 + %.272 =l add %.225, 368 + storel %.34, %.272 + %.273 =l add %.225, 376 + storel %.28, %.273 + %.274 =l add %.225, 384 + storel %.34, %.274 + %.276 =l add %.275, 0 + %.277 =l extsw 0 + %.278 =l copy %.277 + storel %.278, %.276 + %.280 =l add %.279, 0 + storel %.42, %.280 + %.281 =l add %.279, 8 + %.282 =l extsw 0 + %.283 =l mul %.282, 56 + %.284 =l add %.225, %.283 + %.285 =l extsw 1 + %.286 =l mul %.285, 8 + %.287 =l add %.284, %.286 + storel %.287, %.281 + %.288 =l add %.279, 16 + storel %.220, %.288 + %.289 =l add %.279, 24 + %.290 =l extsw 3 + %.291 =l mul %.290, 56 + %.292 =l add %.225, %.291 + %.293 =l extsw 0 + %.294 =l mul %.293, 8 + %.295 =l add %.292, %.294 + storel %.295, %.289 + %.296 =l add %.279, 32 + %.297 =l extsw 0 + %.298 =l mul %.297, 56 + %.299 =l add %.225, %.298 + %.300 =l extsw 1 + %.301 =l mul %.300, 8 + %.302 =l add %.299, %.301 + storel %.302, %.296 + %.303 =l add %.279, 40 + %.304 =l extsw 0 + %.305 =l mul %.304, 56 + %.306 =l add %.225, %.305 + %.307 =l extsw 1 + %.308 =l mul %.307, 8 + %.309 =l add %.306, %.308 + storel %.309, %.303 + %.310 =l add %.279, 48 + %.311 =l extsw 3 + %.312 =l mul %.311, 56 + %.313 =l add %.225, %.312 + %.314 =l extsw 0 + %.315 =l mul %.314, 8 + %.316 =l add %.313, %.315 + storel %.316, %.310 + %.317 =l add %.279, 56 + %.318 =l extsw 0 + %.319 =l mul %.318, 56 + %.320 =l add %.225, %.319 + %.321 =l extsw 1 + %.322 =l mul %.321, 8 + %.323 =l add %.320, %.322 + storel %.323, %.317 + %.324 =l add %.279, 64 + %.325 =l extsw 0 + %.326 =l mul %.325, 56 + %.327 =l add %.225, %.326 + %.328 =l extsw 1 + %.329 =l mul %.328, 8 + %.330 =l add %.327, %.329 + storel %.330, %.324 + %.331 =l add %.279, 72 + %.332 =l extsw 0 + %.333 =l mul %.332, 56 + %.334 =l add %.225, %.333 + %.335 =l extsw 1 + %.336 =l mul %.335, 8 + %.337 =l add %.334, %.336 + storel %.337, %.331 + %.338 =l add %.279, 80 + %.339 =l extsw 0 + %.340 =l mul %.339, 56 + %.341 =l add %.225, %.340 + %.342 =l extsw 1 + %.343 =l mul %.342, 8 + %.344 =l add %.341, %.343 + storel %.344, %.338 + %.345 =l add %.279, 88 + storel %.220, %.345 + %.346 =l add %.279, 96 + %.347 =l extsw 0 + %.348 =l mul %.347, 56 + %.349 =l add %.225, %.348 + %.350 =l extsw 1 + %.351 =l mul %.350, 8 + %.352 =l add %.349, %.351 + storel %.352, %.346 + %.353 =l add %.279, 104 + %.354 =l extsw 0 + %.355 =l mul %.354, 56 + %.356 =l add %.225, %.355 + %.357 =l extsw 1 + %.358 =l mul %.357, 8 + %.359 =l add %.356, %.358 + storel %.359, %.353 + %.360 =l add %.279, 112 + storel %.42, %.360 + %.361 =l add %.279, 120 + %.362 =l extsw 3 + %.363 =l mul %.362, 56 + %.364 =l add %.225, %.363 + %.365 =l extsw 0 + %.366 =l mul %.365, 8 + %.367 =l add %.364, %.366 + storel %.367, %.361 + %.368 =l add %.279, 128 + storel %.220, %.368 + %.369 =l add %.279, 136 + storel %.42, %.369 + %.370 =l add %.279, 144 + storel %.42, %.370 + %.371 =l add %.279, 152 + storel %.220, %.371 + %.372 =l add %.279, 160 + %.373 =l extsw 3 + %.374 =l mul %.373, 56 + %.375 =l add %.225, %.374 + %.376 =l extsw 0 + %.377 =l mul %.376, 8 + %.378 =l add %.375, %.377 + storel %.378, %.372 + %.379 =l add %.279, 168 + storel %.42, %.379 + %.380 =l add %.279, 176 + %.381 =l extsw 0 + %.382 =l mul %.381, 56 + %.383 =l add %.225, %.382 + %.384 =l extsw 1 + %.385 =l mul %.384, 8 + %.386 =l add %.383, %.385 + storel %.386, %.380 + %.387 =l add %.279, 184 + storel %.220, %.387 + %.388 =l add %.279, 192 + %.389 =l extsw 0 + %.390 =l mul %.389, 56 + %.391 =l add %.225, %.390 + %.392 =l extsw 1 + %.393 =l mul %.392, 8 + %.394 =l add %.391, %.393 + storel %.394, %.388 + %.395 =l add %.279, 200 + storel %.220, %.395 + %.396 =l add %.279, 208 + %.397 =l extsw 0 + %.398 =l mul %.397, 56 + %.399 =l add %.225, %.398 + %.400 =l extsw 1 + %.401 =l mul %.400, 8 + %.402 =l add %.399, %.401 + storel %.402, %.396 + %.403 =l add %.279, 216 + storel %.42, %.403 + %.404 =l add %.279, 224 + %.405 =l extsw 0 + %.406 =l mul %.405, 56 + %.407 =l add %.225, %.406 + %.408 =l extsw 1 + %.409 =l mul %.408, 8 + %.410 =l add %.407, %.409 + storel %.410, %.404 + %.411 =l add %.279, 232 + storel %.42, %.411 + %.412 =l add %.279, 240 + storel %.220, %.412 + %.413 =l add %.279, 248 + storel %.220, %.413 + %.414 =l add %.279, 256 + storel %.42, %.414 + %.415 =l add %.279, 264 + %.416 =l extsw 0 + %.417 =l mul %.416, 56 + %.418 =l add %.225, %.417 + %.419 =l extsw 1 + %.420 =l mul %.419, 8 + %.421 =l add %.418, %.420 + storel %.421, %.415 + %.422 =l add %.279, 272 + storel %.220, %.422 + %.423 =l add %.279, 280 + storel %.220, %.423 + %.424 =l add %.279, 288 + %.425 =l extsw 0 + %.426 =l mul %.425, 56 + %.427 =l add %.225, %.426 + %.428 =l extsw 1 + %.429 =l mul %.428, 8 + %.430 =l add %.427, %.429 + storel %.430, %.424 + %.431 =l add %.279, 296 + storel %.220, %.431 + %.432 =l add %.279, 304 + %.433 =l extsw 0 + %.434 =l mul %.433, 56 + %.435 =l add %.225, %.434 + %.436 =l extsw 1 + %.437 =l mul %.436, 8 + %.438 =l add %.435, %.437 + storel %.438, %.432 + %.439 =l add %.279, 312 + storel %.220, %.439 + %.440 =l add %.279, 320 + storel %.220, %.440 + %.441 =l add %.279, 328 + storel %.220, %.441 + %.442 =l add %.279, 336 + storel %.42, %.442 + %.443 =l add %.279, 344 + storel %.220, %.443 + %.444 =l add %.279, 352 + storel %.220, %.444 + %.445 =l add %.279, 360 + storel %.42, %.445 + %.446 =l add %.279, 368 + %.447 =l extsw 0 + %.448 =l mul %.447, 56 + %.449 =l add %.225, %.448 + %.450 =l extsw 1 + %.451 =l mul %.450, 8 + %.452 =l add %.449, %.451 + storel %.452, %.446 + %.453 =l add %.279, 376 + storel %.220, %.453 + %.454 =l add %.279, 384 + %.455 =l extsw 3 + %.456 =l mul %.455, 56 + %.457 =l add %.225, %.456 + %.458 =l extsw 0 + %.459 =l mul %.458, 8 + %.460 =l add %.457, %.459 + storel %.460, %.454 + %.461 =l add %.279, 392 + storel %.220, %.461 + %.462 =l add %.279, 400 + %.463 =l extsw 0 + %.464 =l mul %.463, 56 + %.465 =l add %.225, %.464 + %.466 =l extsw 1 + %.467 =l mul %.466, 8 + %.468 =l add %.465, %.467 + storel %.468, %.462 + %.469 =l add %.279, 408 + storel %.220, %.469 + %.470 =l add %.279, 416 + %.471 =l extsw 0 + %.472 =l mul %.471, 56 + %.473 =l add %.225, %.472 + %.474 =l extsw 1 + %.475 =l mul %.474, 8 + %.476 =l add %.473, %.475 + storel %.476, %.470 + %.477 =l add %.279, 424 + storel %.42, %.477 + %.478 =l add %.279, 432 + %.479 =l extsw 0 + %.480 =l mul %.479, 56 + %.481 =l add %.225, %.480 + %.482 =l extsw 1 + %.483 =l mul %.482, 8 + %.484 =l add %.481, %.483 + storel %.484, %.478 + %.485 =l add %.279, 440 + storel %.220, %.485 + %.486 =l add %.279, 448 + storel %.42, %.486 + %.487 =l add %.279, 456 + storel %.42, %.487 + %.488 =l add %.279, 464 + storel %.220, %.488 + %.489 =l add %.279, 472 + %.490 =l extsw 3 + %.491 =l mul %.490, 56 + %.492 =l add %.225, %.491 + %.493 =l extsw 0 + %.494 =l mul %.493, 8 + %.495 =l add %.492, %.494 + storel %.495, %.489 + %.496 =l add %.279, 480 + storel %.42, %.496 + %.497 =l add %.279, 488 + storel %.220, %.497 + %.498 =l add %.279, 496 + %.499 =l extsw 3 + %.500 =l mul %.499, 56 + %.501 =l add %.225, %.500 + %.502 =l extsw 0 + %.503 =l mul %.502, 8 + %.504 =l add %.501, %.503 + storel %.504, %.498 + %.505 =l add %.279, 504 + storel %.220, %.505 + %.506 =l add %.279, 512 + %.507 =l extsw 0 + %.508 =l mul %.507, 56 + %.509 =l add %.225, %.508 + %.510 =l extsw 1 + %.511 =l mul %.510, 8 + %.512 =l add %.509, %.511 + storel %.512, %.506 + %.513 =l add %.279, 520 + %.514 =l extsw 0 + %.515 =l mul %.514, 56 + %.516 =l add %.225, %.515 + %.517 =l extsw 1 + %.518 =l mul %.517, 8 + %.519 =l add %.516, %.518 + storel %.519, %.513 + %.520 =l add %.279, 528 + %.521 =l extsw 0 + %.522 =l mul %.521, 56 + %.523 =l add %.225, %.522 + %.524 =l extsw 1 + %.525 =l mul %.524, 8 + %.526 =l add %.523, %.525 + storel %.526, %.520 + %.527 =l add %.279, 536 + %.528 =l extsw 0 + %.529 =l mul %.528, 56 + %.530 =l add %.225, %.529 + %.531 =l extsw 1 + %.532 =l mul %.531, 8 + %.533 =l add %.530, %.532 + storel %.533, %.527 + %.534 =l add %.279, 544 + %.535 =l extsw 0 + %.536 =l mul %.535, 56 + %.537 =l add %.225, %.536 + %.538 =l extsw 1 + %.539 =l mul %.538, 8 + %.540 =l add %.537, %.539 + storel %.540, %.534 + %.541 =l add %.279, 552 + storel %.220, %.541 + %.542 =l add %.279, 560 + %.543 =l extsw 0 + %.544 =l mul %.543, 56 + %.545 =l add %.225, %.544 + %.546 =l extsw 1 + %.547 =l mul %.546, 8 + %.548 =l add %.545, %.547 + storel %.548, %.542 + %.549 =l add %.279, 568 + %.550 =l extsw 3 + %.551 =l mul %.550, 56 + %.552 =l add %.225, %.551 + %.553 =l extsw 0 + %.554 =l mul %.553, 8 + %.555 =l add %.552, %.554 + storel %.555, %.549 + %.556 =l add %.279, 576 + storel %.220, %.556 + %.557 =l add %.279, 584 + %.558 =l extsw 0 + %.559 =l mul %.558, 56 + %.560 =l add %.225, %.559 + %.561 =l extsw 1 + %.562 =l mul %.561, 8 + %.563 =l add %.560, %.562 + storel %.563, %.557 + %.564 =l add %.279, 592 + storel %.42, %.564 + %.565 =l add %.279, 600 + storel %.220, %.565 + %.566 =l add %.279, 608 + storel %.220, %.566 + %.567 =l add %.279, 616 + storel %.42, %.567 + %.568 =l add %.279, 624 + %.569 =l extsw 0 + %.570 =l mul %.569, 56 + %.571 =l add %.225, %.570 + %.572 =l extsw 1 + %.573 =l mul %.572, 8 + %.574 =l add %.571, %.573 + storel %.574, %.568 + %.575 =l add %.279, 632 + %.576 =l extsw 0 + %.577 =l mul %.576, 56 + %.578 =l add %.225, %.577 + %.579 =l extsw 1 + %.580 =l mul %.579, 8 + %.581 =l add %.578, %.580 + storel %.581, %.575 + %.582 =l add %.279, 640 + %.583 =l extsw 0 + %.584 =l mul %.583, 56 + %.585 =l add %.225, %.584 + %.586 =l extsw 1 + %.587 =l mul %.586, 8 + %.588 =l add %.585, %.587 + storel %.588, %.582 + %.589 =l add %.279, 648 + storel %.42, %.589 + %.590 =l add %.279, 656 + storel %.220, %.590 + %.591 =l add %.279, 664 + storel %.42, %.591 + %.592 =l add %.279, 672 + storel %.42, %.592 + %.593 =l add %.279, 680 + %.594 =l extsw 0 + %.595 =l mul %.594, 56 + %.596 =l add %.225, %.595 + %.597 =l extsw 1 + %.598 =l mul %.597, 8 + %.599 =l add %.596, %.598 + storel %.599, %.593 + %.600 =l add %.279, 688 + storel %.220, %.600 + %.601 =l add %.279, 696 + %.602 =l extsw 3 + %.603 =l mul %.602, 56 + %.604 =l add %.225, %.603 + %.605 =l extsw 0 + %.606 =l mul %.605, 8 + %.607 =l add %.604, %.606 + storel %.607, %.601 + %.608 =l add %.279, 704 + %.609 =l extsw 0 + %.610 =l mul %.609, 56 + %.611 =l add %.225, %.610 + %.612 =l extsw 1 + %.613 =l mul %.612, 8 + %.614 =l add %.611, %.613 + storel %.614, %.608 + %.615 =l add %.279, 712 + %.616 =l extsw 0 + %.617 =l mul %.616, 56 + %.618 =l add %.225, %.617 + %.619 =l extsw 1 + %.620 =l mul %.619, 8 + %.621 =l add %.618, %.620 + storel %.621, %.615 + %.622 =l add %.279, 720 + %.623 =l extsw 3 + %.624 =l mul %.623, 56 + %.625 =l add %.225, %.624 + %.626 =l extsw 0 + %.627 =l mul %.626, 8 + %.628 =l add %.625, %.627 + storel %.628, %.622 + %.629 =l add %.279, 728 + %.630 =l extsw 0 + %.631 =l mul %.630, 56 + %.632 =l add %.225, %.631 + %.633 =l extsw 1 + %.634 =l mul %.633, 8 + %.635 =l add %.632, %.634 + storel %.635, %.629 + %.636 =l add %.279, 736 + %.637 =l extsw 0 + %.638 =l mul %.637, 56 + %.639 =l add %.225, %.638 + %.640 =l extsw 1 + %.641 =l mul %.640, 8 + %.642 =l add %.639, %.641 + storel %.642, %.636 + %.643 =l add %.279, 744 + %.644 =l extsw 0 + %.645 =l mul %.644, 56 + %.646 =l add %.225, %.645 + %.647 =l extsw 1 + %.648 =l mul %.647, 8 + %.649 =l add %.646, %.648 + storel %.649, %.643 + %.650 =l add %.279, 752 + %.651 =l extsw 0 + %.652 =l mul %.651, 56 + %.653 =l add %.225, %.652 + %.654 =l extsw 1 + %.655 =l mul %.654, 8 + %.656 =l add %.653, %.655 + storel %.656, %.650 + %.657 =l add %.279, 760 + storel %.220, %.657 + %.658 =l add %.279, 768 + %.659 =l extsw 0 + %.660 =l mul %.659, 56 + %.661 =l add %.225, %.660 + %.662 =l extsw 1 + %.663 =l mul %.662, 8 + %.664 =l add %.661, %.663 + storel %.664, %.658 + %.665 =l add %.279, 776 + %.666 =l extsw 0 + %.667 =l mul %.666, 56 + %.668 =l add %.225, %.667 + %.669 =l extsw 1 + %.670 =l mul %.669, 8 + %.671 =l add %.668, %.670 + storel %.671, %.665 + %.672 =l add %.279, 784 + storel %.42, %.672 + %.673 =l add %.279, 792 + %.674 =l extsw 3 + %.675 =l mul %.674, 56 + %.676 =l add %.225, %.675 + %.677 =l extsw 0 + %.678 =l mul %.677, 8 + %.679 =l add %.676, %.678 + storel %.679, %.673 + %.680 =l add %.279, 800 + storel %.220, %.680 + %.681 =l add %.279, 808 + storel %.42, %.681 + %.682 =l add %.279, 816 + storel %.42, %.682 + %.683 =l add %.279, 824 + storel %.220, %.683 + %.684 =l add %.279, 832 + %.685 =l extsw 3 + %.686 =l mul %.685, 56 + %.687 =l add %.225, %.686 + %.688 =l extsw 0 + %.689 =l mul %.688, 8 + %.690 =l add %.687, %.689 + storel %.690, %.684 + %.691 =l add %.279, 840 + storel %.42, %.691 + %.692 =l add %.279, 848 + %.693 =l extsw 0 + %.694 =l mul %.693, 56 + %.695 =l add %.225, %.694 + %.696 =l extsw 1 + %.697 =l mul %.696, 8 + %.698 =l add %.695, %.697 + storel %.698, %.692 + %.699 =l add %.279, 856 + storel %.220, %.699 + %.700 =l add %.279, 864 + %.701 =l extsw 0 + %.702 =l mul %.701, 56 + %.703 =l add %.225, %.702 + %.704 =l extsw 1 + %.705 =l mul %.704, 8 + %.706 =l add %.703, %.705 + storel %.706, %.700 + %.707 =l add %.279, 872 + storel %.220, %.707 + %.708 =l add %.279, 880 + %.709 =l extsw 0 + %.710 =l mul %.709, 56 + %.711 =l add %.225, %.710 + %.712 =l extsw 1 + %.713 =l mul %.712, 8 + %.714 =l add %.711, %.713 + storel %.714, %.708 + %.715 =l add %.279, 888 + storel %.42, %.715 + %.716 =l add %.279, 896 + %.717 =l extsw 0 + %.718 =l mul %.717, 56 + %.719 =l add %.225, %.718 + %.720 =l extsw 1 + %.721 =l mul %.720, 8 + %.722 =l add %.719, %.721 + storel %.722, %.716 + %.723 =l add %.279, 904 + storel %.42, %.723 + %.724 =l add %.279, 912 + storel %.220, %.724 + %.725 =l add %.279, 920 + storel %.220, %.725 + %.726 =l add %.279, 928 + storel %.42, %.726 + %.727 =l add %.279, 936 + %.728 =l extsw 0 + %.729 =l mul %.728, 56 + %.730 =l add %.225, %.729 + %.731 =l extsw 1 + %.732 =l mul %.731, 8 + %.733 =l add %.730, %.732 + storel %.733, %.727 + %.734 =l add %.279, 944 + storel %.220, %.734 + %.735 =l add %.279, 952 + storel %.220, %.735 + %.736 =l add %.279, 960 + %.737 =l extsw 0 + %.738 =l mul %.737, 56 + %.739 =l add %.225, %.738 + %.740 =l extsw 1 + %.741 =l mul %.740, 8 + %.742 =l add %.739, %.741 + storel %.742, %.736 + %.743 =l add %.279, 968 + storel %.220, %.743 + %.744 =l add %.279, 976 + %.745 =l extsw 0 + %.746 =l mul %.745, 56 + %.747 =l add %.225, %.746 + %.748 =l extsw 1 + %.749 =l mul %.748, 8 + %.750 =l add %.747, %.749 + storel %.750, %.744 + %.751 =l add %.279, 984 + storel %.220, %.751 + %.752 =l add %.279, 992 + storel %.220, %.752 + %.753 =l add %.279, 1000 + storel %.220, %.753 + %.754 =l add %.279, 1008 + storel %.42, %.754 + %.755 =l add %.279, 1016 + storel %.220, %.755 + %.756 =l add %.279, 1024 + storel %.220, %.756 + %.757 =l add %.279, 1032 + storel %.42, %.757 + %.758 =l add %.279, 1040 + %.759 =l extsw 0 + %.760 =l mul %.759, 56 + %.761 =l add %.225, %.760 + %.762 =l extsw 1 + %.763 =l mul %.762, 8 + %.764 =l add %.761, %.763 + storel %.764, %.758 + %.765 =l add %.279, 1048 + storel %.220, %.765 + %.766 =l add %.279, 1056 + %.767 =l extsw 3 + %.768 =l mul %.767, 56 + %.769 =l add %.225, %.768 + %.770 =l extsw 0 + %.771 =l mul %.770, 8 + %.772 =l add %.769, %.771 + storel %.772, %.766 + %.773 =l add %.279, 1064 + storel %.220, %.773 + %.774 =l add %.279, 1072 + %.775 =l extsw 0 + %.776 =l mul %.775, 56 + %.777 =l add %.225, %.776 + %.778 =l extsw 1 + %.779 =l mul %.778, 8 + %.780 =l add %.777, %.779 + storel %.780, %.774 + %.781 =l add %.279, 1080 + storel %.220, %.781 + %.782 =l add %.279, 1088 + %.783 =l extsw 0 + %.784 =l mul %.783, 56 + %.785 =l add %.225, %.784 + %.786 =l extsw 1 + %.787 =l mul %.786, 8 + %.788 =l add %.785, %.787 + storel %.788, %.782 + %.789 =l add %.279, 1096 + storel %.42, %.789 + %.790 =l add %.279, 1104 + %.791 =l extsw 0 + %.792 =l mul %.791, 56 + %.793 =l add %.225, %.792 + %.794 =l extsw 1 + %.795 =l mul %.794, 8 + %.796 =l add %.793, %.795 + storel %.796, %.790 + %.797 =l add %.279, 1112 + storel %.220, %.797 + %.798 =l add %.279, 1120 + storel %.42, %.798 + %.799 =l add %.279, 1128 + storel %.42, %.799 + %.800 =l add %.279, 1136 + storel %.220, %.800 + %.801 =l add %.279, 1144 + %.802 =l extsw 3 + %.803 =l mul %.802, 56 + %.804 =l add %.225, %.803 + %.805 =l extsw 0 + %.806 =l mul %.805, 8 + %.807 =l add %.804, %.806 + storel %.807, %.801 + %.808 =l add %.279, 1152 + storel %.42, %.808 + %.809 =l add %.279, 1160 + storel %.220, %.809 + %.810 =l add %.279, 1168 + %.811 =l extsw 3 + %.812 =l mul %.811, 56 + %.813 =l add %.225, %.812 + %.814 =l extsw 0 + %.815 =l mul %.814, 8 + %.816 =l add %.813, %.815 + storel %.816, %.810 + %.820 =w loaduw $g_115 + %.821 =w add %.820, 1 + storew %.821, $g_115 + %.822 =l loadl $g_38 + %.823 =l loadl %.822 + storel %.823, %.42 +@if_join.1499 + %.824 =w loaduw $g_115 + %.825 =w copy %.824 + ret %.825 +} +data $.Lstring.109 = align 1 { b "1", z 1, } +data $.Lstring.110 = align 1 { b "g_2", z 1, } +data $.Lstring.111 = align 1 { b "g_13[i][j][k]", z 1, } +data $.Lstring.112 = align 1 { b "index = [%d][%d][%d]\012", z 1, } +data $.Lstring.113 = align 1 { b "g_24", z 1, } +data $.Lstring.114 = align 1 { b "g_46", z 1, } +data $.Lstring.115 = align 1 { b "g_50", z 1, } +data $.Lstring.116 = align 1 { b "g_57", z 1, } +data $.Lstring.117 = align 1 { b "g_58", z 1, } +data $.Lstring.118 = align 1 { b "g_80", z 1, } +data $.Lstring.119 = align 1 { b "g_81", z 1, } +data $.Lstring.120 = align 1 { b "g_82", z 1, } +data $.Lstring.121 = align 1 { b "g_84", z 1, } +data $.Lstring.122 = align 1 { b "g_115", z 1, } +data $.Lstring.123 = align 1 { b "g_130.f0", z 1, } +data $.Lstring.124 = align 1 { b "g_130.f1", z 1, } +data $.Lstring.125 = align 1 { b "g_130.f2", z 1, } +data $.Lstring.126 = align 1 { b "g_130.f3", z 1, } +data $.Lstring.127 = align 1 { b "g_130.f4", z 1, } +data $.Lstring.128 = align 1 { b "g_132[i]", z 1, } +data $.Lstring.129 = align 1 { b "index = [%d]\012", z 1, } +data $.Lstring.130 = align 1 { b "g_185.f0", z 1, } +data $.Lstring.131 = align 1 { b "g_185.f1", z 1, } +data $.Lstring.132 = align 1 { b "g_185.f2", z 1, } +data $.Lstring.133 = align 1 { b "g_185.f3", z 1, } +data $.Lstring.134 = align 1 { b "g_185.f4", z 1, } +data $.Lstring.135 = align 1 { b "g_185.f5", z 1, } +data $.Lstring.136 = align 1 { b "g_185.f6", z 1, } +data $.Lstring.137 = align 1 { b "g_185.f7", z 1, } +data $.Lstring.138 = align 1 { b "g_185.f8", z 1, } +data $.Lstring.139 = align 1 { b "g_265.f0", z 1, } +data $.Lstring.140 = align 1 { b "g_265.f1", z 1, } +data $.Lstring.141 = align 1 { b "g_265.f2", z 1, } +data $.Lstring.142 = align 1 { b "g_265.f3", z 1, } +data $.Lstring.143 = align 1 { b "g_265.f4", z 1, } +data $.Lstring.144 = align 1 { b "g_265.f5", z 1, } +data $.Lstring.145 = align 1 { b "g_265.f6", z 1, } +data $.Lstring.146 = align 1 { b "g_265.f7", z 1, } +data $.Lstring.147 = align 1 { b "g_265.f8", z 1, } +data $.Lstring.148 = align 1 { b "g_399", z 1, } +data $.Lstring.149 = align 1 { b "g_425", z 1, } +data $.Lstring.150 = align 1 { b "g_477", z 1, } +data $.Lstring.151 = align 1 { b "g_518.f0", z 1, } +data $.Lstring.152 = align 1 { b "g_518.f1", z 1, } +data $.Lstring.153 = align 1 { b "g_518.f2", z 1, } +data $.Lstring.154 = align 1 { b "g_518.f3", z 1, } +data $.Lstring.155 = align 1 { b "g_518.f4", z 1, } +data $.Lstring.156 = align 1 { b "g_518.f5", z 1, } +data $.Lstring.157 = align 1 { b "g_518.f6", z 1, } +data $.Lstring.158 = align 1 { b "g_518.f7", z 1, } +data $.Lstring.159 = align 1 { b "g_518.f8", z 1, } +data $.Lstring.160 = align 1 { b "g_566", z 1, } +data $.Lstring.161 = align 1 { b "g_619", z 1, } +data $.Lstring.162 = align 1 { b "g_629", z 1, } +data $.Lstring.163 = align 1 { b "g_631", z 1, } +data $.Lstring.164 = align 1 { b "g_794.f0", z 1, } +data $.Lstring.165 = align 1 { b "g_794.f1", z 1, } +data $.Lstring.166 = align 1 { b "g_794.f2", z 1, } +data $.Lstring.167 = align 1 { b "g_794.f3", z 1, } +data $.Lstring.168 = align 1 { b "g_794.f4", z 1, } +data $.Lstring.169 = align 1 { b "g_858", z 1, } +data $.Lstring.170 = align 1 { b "g_937", z 1, } +data $.Lstring.171 = align 1 { b "g_1018", z 1, } +data $.Lstring.172 = align 1 { b "g_1130", z 1, } +data $.Lstring.173 = align 1 { b "g_1183.f0", z 1, } +data $.Lstring.174 = align 1 { b "g_1183.f1", z 1, } +data $.Lstring.175 = align 1 { b "g_1183.f2", z 1, } +data $.Lstring.176 = align 1 { b "g_1183.f3", z 1, } +data $.Lstring.177 = align 1 { b "g_1183.f4", z 1, } +data $.Lstring.178 = align 1 { b "g_1183.f5", z 1, } +data $.Lstring.179 = align 1 { b "g_1183.f6", z 1, } +data $.Lstring.180 = align 1 { b "g_1183.f7", z 1, } +data $.Lstring.181 = align 1 { b "g_1183.f8", z 1, } +data $.Lstring.182 = align 1 { b "g_1298", z 1, } +data $.Lstring.183 = align 1 { b "g_1393", z 1, } +data $.Lstring.184 = align 1 { b "g_1604", z 1, } +data $.Lstring.185 = align 1 { b "g_1617", z 1, } +data $.Lstring.186 = align 1 { b "g_1645", z 1, } +data $.Lstring.187 = align 1 { b "g_1922", z 1, } +data $.Lstring.188 = align 1 { b "g_1972", z 1, } +data $.Lstring.189 = align 1 { b "g_2013", z 1, } +data $.Lstring.190 = align 1 { b "g_2028", z 1, } +data $.Lstring.191 = align 1 { b "g_2102", z 1, } +export +function w $main(w %.1, l %.3) { +@start.1500 + %.2 =l alloc4 4 + storew %.1, %.2 + %.4 =l alloc8 8 + storel %.3, %.4 + %.5 =l alloc4 4 + %.6 =l alloc4 4 + %.7 =l alloc4 4 + %.8 =l alloc4 4 +@body.1501 + %.9 =l add %.8, 0 + storew 0, %.9 + %.10 =w loadsw %.2 + %.11 =w ceqw %.10, 2 + %.12 =w cnew %.11, 0 + jnz %.12, @logic_right.1502, @logic_join.1503 +@logic_right.1502 + %.13 =l loadl %.4 + %.14 =l extsw 1 + %.15 =l mul %.14, 8 + %.16 =l add %.13, %.15 + %.17 =l loadl %.16 + %.18 =l copy %.17 + %.19 =l copy $.Lstring.109 + %.20 =w call $strcmp(l %.18, l %.19) + %.21 =w ceqw %.20, 0 + %.22 =w cnew %.21, 0 +@logic_join.1503 + %.23 =w phi @body.1501 %.12, @logic_right.1502 %.22 + %.24 =w cnew %.23, 0 + jnz %.24, @if_true.1504, @if_false.1505 +@if_true.1504 + storew 1, %.8 +@if_false.1505 + call $platform_main_begin() + call $crc32_gentab() + %.25 =l call $func_1() + %.26 =w loadsb $g_2 + %.27 =l extsb %.26 + %.28 =w loadsw %.8 + call $transparent_crc(l %.27, l $.Lstring.110, w %.28) + storew 0, %.5 +@for_cond.1506 + %.29 =w loadsw %.5 + %.30 =w csltw %.29, 9 + jnz %.30, @for_body.1507, @for_join.1509 +@for_body.1507 + storew 0, %.6 +@for_cond.1510 + %.31 =w loadsw %.6 + %.32 =w csltw %.31, 3 + jnz %.32, @for_body.1511, @for_join.1513 +@for_body.1511 + storew 0, %.7 +@for_cond.1514 + %.33 =w loadsw %.7 + %.34 =w csltw %.33, 1 + jnz %.34, @for_body.1515, @for_join.1517 +@for_body.1515 + %.35 =w loadsw %.5 + %.36 =l extsw %.35 + %.37 =l mul %.36, 12 + %.38 =l add $g_13, %.37 + %.39 =w loadsw %.6 + %.40 =l extsw %.39 + %.41 =l mul %.40, 4 + %.42 =l add %.38, %.41 + %.43 =w loadsw %.7 + %.44 =l extsw %.43 + %.45 =l mul %.44, 4 + %.46 =l add %.42, %.45 + %.47 =w loadsw %.46 + %.48 =l extsw %.47 + %.49 =w loadsw %.8 + call $transparent_crc(l %.48, l $.Lstring.111, w %.49) + %.50 =w loadsw %.8 + %.51 =w cnew %.50, 0 + jnz %.51, @if_true.1518, @if_false.1519 +@if_true.1518 + %.52 =l copy $.Lstring.112 + %.53 =w loadsw %.5 + %.54 =w loadsw %.6 + %.55 =w loadsw %.7 + %.56 =w call $printf(l %.52, ..., w %.53, w %.54, w %.55) +@if_false.1519 +@for_cont.1516 + %.57 =w loadsw %.7 + %.58 =w add %.57, 1 + storew %.58, %.7 + jmp @for_cond.1514 +@for_join.1517 +@for_cont.1512 + %.59 =w loadsw %.6 + %.60 =w add %.59, 1 + storew %.60, %.6 + jmp @for_cond.1510 +@for_join.1513 +@for_cont.1508 + %.61 =w loadsw %.5 + %.62 =w add %.61, 1 + storew %.62, %.5 + jmp @for_cond.1506 +@for_join.1509 + %.63 =w loadsw $g_24 + %.64 =l extsw %.63 + %.65 =w loadsw %.8 + call $transparent_crc(l %.64, l $.Lstring.113, w %.65) + %.66 =w loadub $g_46 + %.67 =l extub %.66 + %.68 =w loadsw %.8 + call $transparent_crc(l %.67, l $.Lstring.114, w %.68) + %.69 =w loadsw $g_50 + %.70 =l extsw %.69 + %.71 =w loadsw %.8 + call $transparent_crc(l %.70, l $.Lstring.115, w %.71) + %.72 =w loadub $g_57 + %.73 =l extub %.72 + %.74 =w loadsw %.8 + call $transparent_crc(l %.73, l $.Lstring.116, w %.74) + %.75 =l loadl $g_58 + %.76 =w loadsw %.8 + call $transparent_crc(l %.75, l $.Lstring.117, w %.76) + %.77 =l loadl $g_80 + %.78 =l copy %.77 + %.79 =w loadsw %.8 + call $transparent_crc(l %.78, l $.Lstring.118, w %.79) + %.80 =w loadsh $g_81 + %.81 =l extsh %.80 + %.82 =w loadsw %.8 + call $transparent_crc(l %.81, l $.Lstring.119, w %.82) + %.83 =l loadl $g_82 + %.84 =l copy %.83 + %.85 =w loadsw %.8 + call $transparent_crc(l %.84, l $.Lstring.120, w %.85) + %.86 =w loaduw $g_84 + %.87 =l extuw %.86 + %.88 =w loadsw %.8 + call $transparent_crc(l %.87, l $.Lstring.121, w %.88) + %.89 =w loaduw $g_115 + %.90 =l extuw %.89 + %.91 =w loadsw %.8 + call $transparent_crc(l %.90, l $.Lstring.122, w %.91) + %.92 =l copy $g_130 + %.93 =l mul 0, 1 + %.94 =l add %.92, %.93 + %.95 =l copy %.94 + %.96 =w loadsw %.95 + %.97 =l extsw %.96 + %.98 =w loadsw %.8 + call $transparent_crc(l %.97, l $.Lstring.123, w %.98) + %.99 =l copy $g_130 + %.100 =l mul 4, 1 + %.101 =l add %.99, %.100 + %.102 =l copy %.101 + %.103 =w loaduw %.102 + %.104 =l extuw %.103 + %.105 =w loadsw %.8 + call $transparent_crc(l %.104, l $.Lstring.124, w %.105) + %.106 =l copy $g_130 + %.107 =l mul 8, 1 + %.108 =l add %.106, %.107 + %.109 =l copy %.108 + %.110 =w loadsh %.109 + %.111 =l extsh %.110 + %.112 =w loadsw %.8 + call $transparent_crc(l %.111, l $.Lstring.125, w %.112) + %.113 =l copy $g_130 + %.114 =l mul 12, 1 + %.115 =l add %.113, %.114 + %.116 =l copy %.115 + %.117 =w loadsw %.116 + %.118 =l extsw %.117 + %.119 =w loadsw %.8 + call $transparent_crc(l %.118, l $.Lstring.126, w %.119) + %.120 =l copy $g_130 + %.121 =l mul 16, 1 + %.122 =l add %.120, %.121 + %.123 =l copy %.122 + %.124 =w loaduw %.123 + %.125 =l extuw %.124 + %.126 =w loadsw %.8 + call $transparent_crc(l %.125, l $.Lstring.127, w %.126) + storew 0, %.5 +@for_cond.1520 + %.127 =w loadsw %.5 + %.128 =w csltw %.127, 6 + jnz %.128, @for_body.1521, @for_join.1523 +@for_body.1521 + %.129 =w loadsw %.5 + %.130 =l extsw %.129 + %.131 =l mul %.130, 1 + %.132 =l add $g_132, %.131 + %.133 =w loadsb %.132 + %.134 =l extsb %.133 + %.135 =w loadsw %.8 + call $transparent_crc(l %.134, l $.Lstring.128, w %.135) + %.136 =w loadsw %.8 + %.137 =w cnew %.136, 0 + jnz %.137, @if_true.1524, @if_false.1525 +@if_true.1524 + %.138 =l copy $.Lstring.129 + %.139 =w loadsw %.5 + %.140 =w call $printf(l %.138, ..., w %.139) +@if_false.1525 +@for_cont.1522 + %.141 =w loadsw %.5 + %.142 =w add %.141, 1 + storew %.142, %.5 + jmp @for_cond.1520 +@for_join.1523 + %.143 =l copy $g_185 + %.144 =l mul 0, 1 + %.145 =l add %.143, %.144 + %.146 =l copy %.145 + %.147 =w loadub %.146 + %.148 =l extub %.147 + %.149 =w loadsw %.8 + call $transparent_crc(l %.148, l $.Lstring.130, w %.149) + %.150 =l copy $g_185 + %.151 =l mul 8, 1 + %.152 =l add %.150, %.151 + %.153 =l copy %.152 + %.154 =l loadl %.153 + %.155 =l copy %.154 + %.156 =w loadsw %.8 + call $transparent_crc(l %.155, l $.Lstring.131, w %.156) + %.157 =l copy $g_185 + %.158 =l mul 16, 1 + %.159 =l add %.157, %.158 + %.160 =l copy %.159 + %.161 =w loadsw %.160 + %.162 =l extsw %.161 + %.163 =w loadsw %.8 + call $transparent_crc(l %.162, l $.Lstring.132, w %.163) + %.164 =l copy $g_185 + %.165 =l mul 24, 1 + %.166 =l add %.164, %.165 + %.167 =l copy %.166 + %.168 =l loadl %.167 + %.169 =w loadsw %.8 + call $transparent_crc(l %.168, l $.Lstring.133, w %.169) + %.170 =l copy $g_185 + %.171 =l mul 32, 1 + %.172 =l add %.170, %.171 + %.173 =l copy %.172 + %.174 =w loaduw %.173 + %.175 =l extuw %.174 + %.176 =w loadsw %.8 + call $transparent_crc(l %.175, l $.Lstring.134, w %.176) + %.177 =l copy $g_185 + %.178 =l mul 36, 1 + %.179 =l add %.177, %.178 + %.180 =l copy %.179 + %.181 =w loaduw %.180 + %.182 =l extuw %.181 + %.183 =w loadsw %.8 + call $transparent_crc(l %.182, l $.Lstring.135, w %.183) + %.184 =l copy $g_185 + %.185 =l mul 40, 1 + %.186 =l add %.184, %.185 + %.187 =l copy %.186 + %.188 =w loadsw %.187 + %.189 =l extsw %.188 + %.190 =w loadsw %.8 + call $transparent_crc(l %.189, l $.Lstring.136, w %.190) + %.191 =l copy $g_185 + %.192 =l mul 44, 1 + %.193 =l add %.191, %.192 + %.194 =l copy %.193 + %.195 =w loadsw %.194 + %.196 =l extsw %.195 + %.197 =w loadsw %.8 + call $transparent_crc(l %.196, l $.Lstring.137, w %.197) + %.198 =l copy $g_185 + %.199 =l mul 48, 1 + %.200 =l add %.198, %.199 + %.201 =l copy %.200 + %.202 =w loadsw %.201 + %.203 =l extsw %.202 + %.204 =w loadsw %.8 + call $transparent_crc(l %.203, l $.Lstring.138, w %.204) + %.205 =l copy $g_265 + %.206 =l mul 0, 1 + %.207 =l add %.205, %.206 + %.208 =l copy %.207 + %.209 =w loadub %.208 + %.210 =l extub %.209 + %.211 =w loadsw %.8 + call $transparent_crc(l %.210, l $.Lstring.139, w %.211) + %.212 =l copy $g_265 + %.213 =l mul 8, 1 + %.214 =l add %.212, %.213 + %.215 =l copy %.214 + %.216 =l loadl %.215 + %.217 =l copy %.216 + %.218 =w loadsw %.8 + call $transparent_crc(l %.217, l $.Lstring.140, w %.218) + %.219 =l copy $g_265 + %.220 =l mul 16, 1 + %.221 =l add %.219, %.220 + %.222 =l copy %.221 + %.223 =w loadsw %.222 + %.224 =l extsw %.223 + %.225 =w loadsw %.8 + call $transparent_crc(l %.224, l $.Lstring.141, w %.225) + %.226 =l copy $g_265 + %.227 =l mul 24, 1 + %.228 =l add %.226, %.227 + %.229 =l copy %.228 + %.230 =l loadl %.229 + %.231 =w loadsw %.8 + call $transparent_crc(l %.230, l $.Lstring.142, w %.231) + %.232 =l copy $g_265 + %.233 =l mul 32, 1 + %.234 =l add %.232, %.233 + %.235 =l copy %.234 + %.236 =w loaduw %.235 + %.237 =l extuw %.236 + %.238 =w loadsw %.8 + call $transparent_crc(l %.237, l $.Lstring.143, w %.238) + %.239 =l copy $g_265 + %.240 =l mul 36, 1 + %.241 =l add %.239, %.240 + %.242 =l copy %.241 + %.243 =w loaduw %.242 + %.244 =l extuw %.243 + %.245 =w loadsw %.8 + call $transparent_crc(l %.244, l $.Lstring.144, w %.245) + %.246 =l copy $g_265 + %.247 =l mul 40, 1 + %.248 =l add %.246, %.247 + %.249 =l copy %.248 + %.250 =w loadsw %.249 + %.251 =l extsw %.250 + %.252 =w loadsw %.8 + call $transparent_crc(l %.251, l $.Lstring.145, w %.252) + %.253 =l copy $g_265 + %.254 =l mul 44, 1 + %.255 =l add %.253, %.254 + %.256 =l copy %.255 + %.257 =w loadsw %.256 + %.258 =l extsw %.257 + %.259 =w loadsw %.8 + call $transparent_crc(l %.258, l $.Lstring.146, w %.259) + %.260 =l copy $g_265 + %.261 =l mul 48, 1 + %.262 =l add %.260, %.261 + %.263 =l copy %.262 + %.264 =w loadsw %.263 + %.265 =l extsw %.264 + %.266 =w loadsw %.8 + call $transparent_crc(l %.265, l $.Lstring.147, w %.266) + %.267 =l loadl $g_399 + %.268 =w loadsw %.8 + call $transparent_crc(l %.267, l $.Lstring.148, w %.268) + %.269 =w loaduh $g_425 + %.270 =l extuh %.269 + %.271 =w loadsw %.8 + call $transparent_crc(l %.270, l $.Lstring.149, w %.271) + %.272 =l loadl $g_477 + %.273 =w loadsw %.8 + call $transparent_crc(l %.272, l $.Lstring.150, w %.273) + %.274 =l copy $g_518 + %.275 =l mul 0, 1 + %.276 =l add %.274, %.275 + %.277 =l copy %.276 + %.278 =w loadub %.277 + %.279 =l extub %.278 + %.280 =w loadsw %.8 + call $transparent_crc(l %.279, l $.Lstring.151, w %.280) + %.281 =l copy $g_518 + %.282 =l mul 8, 1 + %.283 =l add %.281, %.282 + %.284 =l copy %.283 + %.285 =l loadl %.284 + %.286 =l copy %.285 + %.287 =w loadsw %.8 + call $transparent_crc(l %.286, l $.Lstring.152, w %.287) + %.288 =l copy $g_518 + %.289 =l mul 16, 1 + %.290 =l add %.288, %.289 + %.291 =l copy %.290 + %.292 =w loadsw %.291 + %.293 =l extsw %.292 + %.294 =w loadsw %.8 + call $transparent_crc(l %.293, l $.Lstring.153, w %.294) + %.295 =l copy $g_518 + %.296 =l mul 24, 1 + %.297 =l add %.295, %.296 + %.298 =l copy %.297 + %.299 =l loadl %.298 + %.300 =w loadsw %.8 + call $transparent_crc(l %.299, l $.Lstring.154, w %.300) + %.301 =l copy $g_518 + %.302 =l mul 32, 1 + %.303 =l add %.301, %.302 + %.304 =l copy %.303 + %.305 =w loaduw %.304 + %.306 =l extuw %.305 + %.307 =w loadsw %.8 + call $transparent_crc(l %.306, l $.Lstring.155, w %.307) + %.308 =l copy $g_518 + %.309 =l mul 36, 1 + %.310 =l add %.308, %.309 + %.311 =l copy %.310 + %.312 =w loaduw %.311 + %.313 =l extuw %.312 + %.314 =w loadsw %.8 + call $transparent_crc(l %.313, l $.Lstring.156, w %.314) + %.315 =l copy $g_518 + %.316 =l mul 40, 1 + %.317 =l add %.315, %.316 + %.318 =l copy %.317 + %.319 =w loadsw %.318 + %.320 =l extsw %.319 + %.321 =w loadsw %.8 + call $transparent_crc(l %.320, l $.Lstring.157, w %.321) + %.322 =l copy $g_518 + %.323 =l mul 44, 1 + %.324 =l add %.322, %.323 + %.325 =l copy %.324 + %.326 =w loadsw %.325 + %.327 =l extsw %.326 + %.328 =w loadsw %.8 + call $transparent_crc(l %.327, l $.Lstring.158, w %.328) + %.329 =l copy $g_518 + %.330 =l mul 48, 1 + %.331 =l add %.329, %.330 + %.332 =l copy %.331 + %.333 =w loadsw %.332 + %.334 =l extsw %.333 + %.335 =w loadsw %.8 + call $transparent_crc(l %.334, l $.Lstring.159, w %.335) + %.336 =w loadub $g_566 + %.337 =l extub %.336 + %.338 =w loadsw %.8 + call $transparent_crc(l %.337, l $.Lstring.160, w %.338) + %.339 =w loaduh $g_619 + %.340 =l extuh %.339 + %.341 =w loadsw %.8 + call $transparent_crc(l %.340, l $.Lstring.161, w %.341) + %.342 =w loadsb $g_629 + %.343 =l extsb %.342 + %.344 =w loadsw %.8 + call $transparent_crc(l %.343, l $.Lstring.162, w %.344) + %.345 =w loadsb $g_631 + %.346 =l extsb %.345 + %.347 =w loadsw %.8 + call $transparent_crc(l %.346, l $.Lstring.163, w %.347) + %.348 =l copy $g_794 + %.349 =l mul 0, 1 + %.350 =l add %.348, %.349 + %.351 =l copy %.350 + %.352 =w loadsw %.351 + %.353 =l extsw %.352 + %.354 =w loadsw %.8 + call $transparent_crc(l %.353, l $.Lstring.164, w %.354) + %.355 =l copy $g_794 + %.356 =l mul 4, 1 + %.357 =l add %.355, %.356 + %.358 =l copy %.357 + %.359 =w loaduw %.358 + %.360 =l extuw %.359 + %.361 =w loadsw %.8 + call $transparent_crc(l %.360, l $.Lstring.165, w %.361) + %.362 =l copy $g_794 + %.363 =l mul 8, 1 + %.364 =l add %.362, %.363 + %.365 =l copy %.364 + %.366 =w loadsh %.365 + %.367 =l extsh %.366 + %.368 =w loadsw %.8 + call $transparent_crc(l %.367, l $.Lstring.166, w %.368) + %.369 =l copy $g_794 + %.370 =l mul 12, 1 + %.371 =l add %.369, %.370 + %.372 =l copy %.371 + %.373 =w loadsw %.372 + %.374 =l extsw %.373 + %.375 =w loadsw %.8 + call $transparent_crc(l %.374, l $.Lstring.167, w %.375) + %.376 =l copy $g_794 + %.377 =l mul 16, 1 + %.378 =l add %.376, %.377 + %.379 =l copy %.378 + %.380 =w loaduw %.379 + %.381 =l extuw %.380 + %.382 =w loadsw %.8 + call $transparent_crc(l %.381, l $.Lstring.168, w %.382) + %.383 =w loaduh $g_858 + %.384 =l extuh %.383 + %.385 =w loadsw %.8 + call $transparent_crc(l %.384, l $.Lstring.169, w %.385) + %.386 =w loadsb $g_937 + %.387 =l extsb %.386 + %.388 =w loadsw %.8 + call $transparent_crc(l %.387, l $.Lstring.170, w %.388) + %.389 =w loaduw $g_1018 + %.390 =l extuw %.389 + %.391 =w loadsw %.8 + call $transparent_crc(l %.390, l $.Lstring.171, w %.391) + %.392 =w loadsb $g_1130 + %.393 =l extsb %.392 + %.394 =w loadsw %.8 + call $transparent_crc(l %.393, l $.Lstring.172, w %.394) + %.395 =l copy $g_1183 + %.396 =l mul 0, 1 + %.397 =l add %.395, %.396 + %.398 =l copy %.397 + %.399 =w loadub %.398 + %.400 =l extub %.399 + %.401 =w loadsw %.8 + call $transparent_crc(l %.400, l $.Lstring.173, w %.401) + %.402 =l copy $g_1183 + %.403 =l mul 8, 1 + %.404 =l add %.402, %.403 + %.405 =l copy %.404 + %.406 =l loadl %.405 + %.407 =l copy %.406 + %.408 =w loadsw %.8 + call $transparent_crc(l %.407, l $.Lstring.174, w %.408) + %.409 =l copy $g_1183 + %.410 =l mul 16, 1 + %.411 =l add %.409, %.410 + %.412 =l copy %.411 + %.413 =w loadsw %.412 + %.414 =l extsw %.413 + %.415 =w loadsw %.8 + call $transparent_crc(l %.414, l $.Lstring.175, w %.415) + %.416 =l copy $g_1183 + %.417 =l mul 24, 1 + %.418 =l add %.416, %.417 + %.419 =l copy %.418 + %.420 =l loadl %.419 + %.421 =w loadsw %.8 + call $transparent_crc(l %.420, l $.Lstring.176, w %.421) + %.422 =l copy $g_1183 + %.423 =l mul 32, 1 + %.424 =l add %.422, %.423 + %.425 =l copy %.424 + %.426 =w loaduw %.425 + %.427 =l extuw %.426 + %.428 =w loadsw %.8 + call $transparent_crc(l %.427, l $.Lstring.177, w %.428) + %.429 =l copy $g_1183 + %.430 =l mul 36, 1 + %.431 =l add %.429, %.430 + %.432 =l copy %.431 + %.433 =w loaduw %.432 + %.434 =l extuw %.433 + %.435 =w loadsw %.8 + call $transparent_crc(l %.434, l $.Lstring.178, w %.435) + %.436 =l copy $g_1183 + %.437 =l mul 40, 1 + %.438 =l add %.436, %.437 + %.439 =l copy %.438 + %.440 =w loadsw %.439 + %.441 =l extsw %.440 + %.442 =w loadsw %.8 + call $transparent_crc(l %.441, l $.Lstring.179, w %.442) + %.443 =l copy $g_1183 + %.444 =l mul 44, 1 + %.445 =l add %.443, %.444 + %.446 =l copy %.445 + %.447 =w loadsw %.446 + %.448 =l extsw %.447 + %.449 =w loadsw %.8 + call $transparent_crc(l %.448, l $.Lstring.180, w %.449) + %.450 =l copy $g_1183 + %.451 =l mul 48, 1 + %.452 =l add %.450, %.451 + %.453 =l copy %.452 + %.454 =w loadsw %.453 + %.455 =l extsw %.454 + %.456 =w loadsw %.8 + call $transparent_crc(l %.455, l $.Lstring.181, w %.456) + %.457 =w loaduw $g_1298 + %.458 =l extuw %.457 + %.459 =w loadsw %.8 + call $transparent_crc(l %.458, l $.Lstring.182, w %.459) + %.460 =w loaduw $g_1393 + %.461 =l extuw %.460 + %.462 =w loadsw %.8 + call $transparent_crc(l %.461, l $.Lstring.183, w %.462) + %.463 =l loadl $g_1604 + %.464 =w loadsw %.8 + call $transparent_crc(l %.463, l $.Lstring.184, w %.464) + %.465 =w loaduh $g_1617 + %.466 =l extuh %.465 + %.467 =w loadsw %.8 + call $transparent_crc(l %.466, l $.Lstring.185, w %.467) + %.468 =w loadsw $g_1645 + %.469 =l extsw %.468 + %.470 =w loadsw %.8 + call $transparent_crc(l %.469, l $.Lstring.186, w %.470) + %.471 =w loadsh $g_1922 + %.472 =l extsh %.471 + %.473 =w loadsw %.8 + call $transparent_crc(l %.472, l $.Lstring.187, w %.473) + %.474 =l loadl $g_1972 + %.475 =l copy %.474 + %.476 =w loadsw %.8 + call $transparent_crc(l %.475, l $.Lstring.188, w %.476) + %.477 =w loaduw $g_2013 + %.478 =l extuw %.477 + %.479 =w loadsw %.8 + call $transparent_crc(l %.478, l $.Lstring.189, w %.479) + %.480 =l loadl $g_2028 + %.481 =l copy %.480 + %.482 =w loadsw %.8 + call $transparent_crc(l %.481, l $.Lstring.190, w %.482) + %.483 =w loaduh $g_2102 + %.484 =l extuh %.483 + %.485 =w loadsw %.8 + call $transparent_crc(l %.484, l $.Lstring.191, w %.485) + %.486 =l loadl $crc32_context + %.487 =l copy 4294967295 + %.488 =l xor %.486, %.487 + %.489 =w loadsw %.8 + call $platform_main_end(l %.488, w %.489) + ret 0 +} +export data $crc32_context = align 8 { z 8 } +data $__undefined = align 8 { z 8 } diff --git a/src/qbe/test/_spill1.ssa b/src/qbe/test/_spill1.ssa new file mode 100644 index 00000000..df5e4c28 --- /dev/null +++ b/src/qbe/test/_spill1.ssa @@ -0,0 +1,22 @@ +# test with NReg == 3 +# there must be a spill +# happening on %c +# +# if you replace the sub +# by an add or comment +# the two marked lines +# there should be no +# spill +# + +function $test() { +@start + %f =w copy 0 # here + %b =w copy 1 + %c =w copy 2 + %a =w sub %b, %c + %d =w copy %b + %e =w copy %f # and there + %g =w copy %a + ret +} diff --git a/src/qbe/test/_spill2.ssa b/src/qbe/test/_spill2.ssa new file mode 100644 index 00000000..d462d0bf --- /dev/null +++ b/src/qbe/test/_spill2.ssa @@ -0,0 +1,22 @@ +# stupid spilling test + +function $test() { +@start + %x1 =w copy 10 + %x2 =w add %x1, %x1 + %x3 =w sub %x2, %x1 + %x4 =w add %x3, %x1 + %x5 =w sub %x4, %x1 + %x6 =w add %x5, %x1 + %x7 =w sub %x6, %x1 + %x8 =w add %x7, %x1 + %x9 =w sub %x8, %x8 + %x10 =w add %x9, %x7 + %x11 =w sub %x10, %x6 + %x12 =w add %x11, %x5 + %x13 =w sub %x12, %x4 + %x14 =w add %x13, %x3 + %x15 =w sub %x14, %x2 + %x16 =w add %x15, %x1 + ret +} diff --git a/src/qbe/test/_spill3.ssa b/src/qbe/test/_spill3.ssa new file mode 100644 index 00000000..cdfda2d3 --- /dev/null +++ b/src/qbe/test/_spill3.ssa @@ -0,0 +1,24 @@ +# make sure comparisons +# never get their two +# operands in memory +# run with NReg == 3, or +# adapt it! + +function $test() { +@start + %a =w loadw $a + %b =w loadw $a + +@loop + %c =w phi @start 0, @loop %f + %d =w phi @start 0, @loop %g + %e =w phi @start 0, @loop %h + %f =w add %c, %d + %g =w add %c, %e + %h =w add %e, %d + %x =w cslew %a, %b + jnz %x, @loop, @end + +@end + ret +} diff --git a/src/qbe/test/abi1.ssa b/src/qbe/test/abi1.ssa new file mode 100644 index 00000000..049f10e3 --- /dev/null +++ b/src/qbe/test/abi1.ssa @@ -0,0 +1,60 @@ +# test calling into C with two +# large struct arguments (passed +# on the stack) + +type :mem = { b 17 } + +function $alpha(l %p, w %l, l %n) { +@ini + %pe =l add %p, %n +@lop + %p1 =l phi @ini %p, @lop %p2 + %l1 =w phi @ini %l, @lop %l2 + storeb %l1, %p1 + %p2 =l add %p1, 1 + %l2 =w add %l1, 1 + %c1 =w ceql %p1, %pe + jnz %c1, @end, @lop +@end + storeb 0, %pe + ret +} + +export +function $test() { +@start + %p =l alloc4 17 + %q =l alloc4 17 + %r0 =w call $alpha(l %p, w 65, l 16) + %r1 =w call $alpha(l %q, w 97, l 16) + %r2 =w call $fcb(:mem %p, w 1, w 2, w 3, w 4, w 5, w 6, w 7, w 8, w 9, :mem %q) + ret +} + + +# >>> driver +# #include +# typedef struct { char t[17]; } mem; +# extern void test(); +# void fcb(mem m, int i1, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, mem n) { +# printf("fcb: m = (mem){ t = \"%s\" }\n", m.t); +# printf(" n = (mem){ t = \"%s\" }\n", n.t); +# #define T(n) printf(" i%d = %d\n", n, i##n); +# T(1) T(2) T(3) T(4) T(5) T(6) T(7) T(8) T(9) +# } +# int main() { test(); return 0; } +# <<< + +# >>> output +# fcb: m = (mem){ t = "ABCDEFGHIJKLMNOP" } +# n = (mem){ t = "abcdefghijklmnop" } +# i1 = 1 +# i2 = 2 +# i3 = 3 +# i4 = 4 +# i5 = 5 +# i6 = 6 +# i7 = 7 +# i8 = 8 +# i9 = 9 +# <<< diff --git a/src/qbe/test/abi2.ssa b/src/qbe/test/abi2.ssa new file mode 100644 index 00000000..42a3baeb --- /dev/null +++ b/src/qbe/test/abi2.ssa @@ -0,0 +1,19 @@ +type :fps = { s, b, s } + +export +function s $sum(:fps %p) { +@start + %f1 =s load %p + %p8 =l add 8, %p + %f2 =s load %p8 + %s =s add %f1, %f2 + ret %s +} + +# >>> driver +# typedef struct { float f1; char b; float f2; } fps; +# extern float sum(fps); +# int main() { fps x = { 1.23, -1, 2.34 }; return !(sum(x) == 1.23f+2.34f); } +# /* Note the f suffixes above are important +# * otherwise C does double operations. */ +# <<< diff --git a/src/qbe/test/abi3.ssa b/src/qbe/test/abi3.ssa new file mode 100644 index 00000000..5ca71f4b --- /dev/null +++ b/src/qbe/test/abi3.ssa @@ -0,0 +1,45 @@ +type :four = {l, b, w} + +data $z = { w 0 } + +export +function $test() { + @start + %a =w loadw $z + %y =w add %a, %a + %yl =l extsw %y + + %s =l alloc8 16 # allocate a :four struct + %s1 =l add %s, 12 # get address of the w + storel 4, %s # set the l + storew 5, %s1 # set the w + + # only the last argument should be on the stack + %f =l add $F, %yl + %x =w call %f(w %y, w 1, w 2, w 3, :four %s, w 6) + + # store the result in the + # global variable a + + %x1 =w add %y, %x + storew %x1, $a + ret +} + +# >>> driver +# #include +# struct four { long long l; char c; int i; }; +# extern void test(void); +# int F(int a0, int a1, int a2, int a3, struct four s, int a6) { +# printf("%d %d %d %d %d %d %d\n", +# a0, a1, a2, a3, (int)s.l, s.i, a6); +# return 42; +# } +# int a; +# int main() { test(); printf("%d\n", a); return 0; } +# <<< + +# >>> output +# 0 1 2 3 4 5 6 +# 42 +# <<< diff --git a/src/qbe/test/abi4.ssa b/src/qbe/test/abi4.ssa new file mode 100644 index 00000000..47255bf9 --- /dev/null +++ b/src/qbe/test/abi4.ssa @@ -0,0 +1,39 @@ +# return a large struct to C + +type :mem = { b 17 } + +function $alpha(l %p, w %l, l %n) { +@ini + %pe =l add %p, %n +@lop + %p1 =l phi @ini %p, @lop %p2 + %l1 =w phi @ini %l, @lop %l2 + storeb %l1, %p1 + %p2 =l add %p1, 1 + %l2 =w add %l1, 1 + %c1 =w ceql %p1, %pe + jnz %c1, @end, @lop +@end + storeb 0, %pe + ret +} + +export +function :mem $test() { +@ini + %p =l alloc4 17 + %r0 =w call $alpha(l %p, w 65, l 16) + ret %p +} + + +# >>> driver +# #include +# typedef struct { char t[17]; } mem; +# extern mem test(void); +# int main() { mem m = test(); printf("%s\n", m.t); return 0; } +# <<< + +# >>> output +# ABCDEFGHIJKLMNOP +# <<< diff --git a/src/qbe/test/abi5.ssa b/src/qbe/test/abi5.ssa new file mode 100644 index 00000000..cd786cc2 --- /dev/null +++ b/src/qbe/test/abi5.ssa @@ -0,0 +1,144 @@ +# returning structs from C + +type :st1 = { b 17 } +type :st2 = { w } +type :st3 = { s, w } +type :st4 = { w, d } +type :st5 = { s, l } +type :st6 = { b 16 } +type :st7 = { s, d } +type :st8 = { w 4 } +type :un9 = { { b } { s } } +type :st9 = { w, :un9 } +type :sta = { b, s } +type :stb = { b, b, s } + +data $fmt1 = { b "t1: %s\n", b 0 } +data $fmt2 = { b "t2: %d\n", b 0 } +data $fmt3 = { b "t3: %f %d\n", b 0 } +data $fmt4 = { b "t4: %d %f\n", b 0 } +data $fmt5 = { b "t5: %f %lld\n", b 0 } +data $fmt6 = { b "t6: %s\n", b 0 } +data $fmt7 = { b "t7: %f %f\n", b 0 } +data $fmt8 = { b "t8: %d %d %d %d\n", b 0 } +data $fmt9 = { b "t9: %d %f\n", b 0 } +data $fmta = { b "ta: %d %f\n", b 0 } +data $fmtb = { b "tb: %d %d %f\n", b 0 } + +export +function $test() { +@start + %r1 =:st1 call $t1() + %i1 =w call $printf(l $fmt1, ..., l %r1) + + %r2 =:st2 call $t2() + %w2 =w loadw %r2 + %i2 =w call $printf(l $fmt2, ..., w %w2) + + %r3 =:st3 call $t3() + %s3 =s loads %r3 + %r34 =l add %r3, 4 + %w3 =w loadw %r34 + %p3 =d exts %s3 + %i3 =w call $printf(l $fmt3, ..., d %p3, w %w3) + + %r4 =:st4 call $t4() + %w4 =w loadw %r4 + %r48 =l add 8, %r4 + %d4 =d loadd %r48 + %i4 =w call $printf(l $fmt4, ..., w %w4, d %d4) + + %r5 =:st5 call $t5() + %s5 =s loads %r5 + %d5 =d exts %s5 + %r58 =l add %r5, 8 + %l5 =l loadl %r58 + %i5 =w call $printf(l $fmt5, ..., d %d5, l %l5) + + %r6 =:st6 call $t6() + %i6 =w call $printf(l $fmt6, ..., l %r6) + + %r7 =:st7 call $t7() + %s7 =s loads %r7 + %d71 =d exts %s7 + %r78 =l add %r7, 8 + %d72 =d loadd %r78 + %i7 =w call $printf(l $fmt7, ..., d %d71, d %d72) + + %r8 =:st8 call $t8() + %r84 =l add 4, %r8 + %r88 =l add 4, %r84 + %r812 =l add 4, %r88 + %w81 =w loadw %r8 + %w82 =w loadw %r84 + %w83 =w loadw %r88 + %w84 =w loadw %r812 + %i8 =w call $printf(l $fmt8, ..., w %w81, w %w82, w %w83, w %w84) + + %r9 =:st9 call $t9() + %r94 =l add 4, %r9 + %w9 =w loadw %r9 + %s9 =s loads %r94 + %d9 =d exts %s9 + %i9 =w call $printf(l $fmt9, ..., w %w9, d %d9) + + %ra =:sta call $ta() + %ra4 =l add 4, %ra + %wa =w loadsb %ra + %sa =s loads %ra4 + %da =d exts %sa + %ia =w call $printf(l $fmta, ..., w %wa, d %da) + + %rb =:stb call $tb() + %rb1 =l add 1, %rb + %rb4 =l add 4, %rb + %w0b =w loadsb %rb + %w1b =w loadsb %rb1 + %sb =s loads %rb4 + %db =d exts %sb + %ib =w call $printf(l $fmtb, ..., w %w0b, w %w1b, d %db) + + ret +} + + +# >>> driver +# typedef struct { char t[17]; } st1; +# typedef struct { int i; } st2; +# typedef struct { float f; int i; } st3; +# typedef struct { int i; double d; } st4; +# typedef struct { float f; long long l; } st5; +# typedef struct { char t[16]; } st6; +# typedef struct { float f; double d; } st7; +# typedef struct { int i[4]; } st8; +# typedef struct { int i; union { char c; float f; } u; } st9; +# typedef struct { char c; float f; } sta; +# typedef struct { char c0, c1; float f; } stb; +# extern void test(void); +# st1 t1() { return (st1){"abcdefghijklmnop"}; } +# st2 t2() { return (st2){2}; } +# st3 t3() { return (st3){3.0,30}; } +# st4 t4() { return (st4){4,-40}; } +# st5 t5() { return (st5){5.5,-55}; } +# st6 t6() { return (st6){"abcdefghijklmno"}; } +# st7 t7() { return (st7){7.77,77.7}; } +# st8 t8() { return (st8){-8,88,-888,8888}; } +# st9 t9() { return (st9){9,{.f=9.9}}; } +# sta ta() { return (sta){-10,10.1}; } +# stb tb() { return (stb){-1,11,11.11}; } +# int main() { test(); return 0; } +# <<< + +# >>> output +# t1: abcdefghijklmnop +# t2: 2 +# t3: 3.000000 30 +# t4: 4 -40.000000 +# t5: 5.500000 -55 +# t6: abcdefghijklmno +# t7: 7.770000 77.700000 +# t8: -8 88 -888 8888 +# t9: 9 9.900000 +# ta: -10 10.100000 +# tb: -1 11 11.110000 +# <<< diff --git a/src/qbe/test/abi6.ssa b/src/qbe/test/abi6.ssa new file mode 100644 index 00000000..da2370cc --- /dev/null +++ b/src/qbe/test/abi6.ssa @@ -0,0 +1,38 @@ +# test arm64's hfa + +data $dfmt = { b "double: %g\n", b 0 } + +type :hfa3 = { s, s, s } + +export +function $f(:hfa3 %h1, :hfa3 %h2, d %d1, :hfa3 %h3, d %d2) { + # the first three parameters should be in 7 registers + # the last two should be on the stack +@start + + call $phfa3(:hfa3 %h1) + call $phfa3(:hfa3 %h2) + call $phfa3(:hfa3 %h3) + call $printf(l $dfmt, ..., d %d1) + call $printf(l $dfmt, ..., d %d2) + ret +} + +# >>> driver +# #include +# typedef struct { float f1, f2, f3; } hfa3; +# void f(hfa3, hfa3, double, hfa3, double); +# void phfa3(hfa3 h) { printf("{ %g, %g, %g }\n", h.f1, h.f2, h.f3); } +# int main() { +# hfa3 h1={1,2,3}, h2={2,3,4}, h3={3,4,5}; +# f(h1, h2, 1, h3, 2); +# } +# <<< + +# >>> output +# { 1, 2, 3 } +# { 2, 3, 4 } +# { 3, 4, 5 } +# double: 1 +# double: 2 +# <<< diff --git a/src/qbe/test/abi7.ssa b/src/qbe/test/abi7.ssa new file mode 100644 index 00000000..193e36a8 --- /dev/null +++ b/src/qbe/test/abi7.ssa @@ -0,0 +1,21 @@ +# test padding calculation with +# embedded struct + +type :s1 = align 4 { w 3 } +type :s2 = align 4 { b 1, :s1 1 } + +export function :s2 $test() { +@start + ret $s +} + +# >>> driver +# struct s2 { +# char x; +# struct { int a[3]; } s1; +# } s = { .x = 123 }; +# extern struct s2 test(void); +# int main(void) { +# return !(test().x == 123); +# } +# <<< diff --git a/src/qbe/test/abi8.ssa b/src/qbe/test/abi8.ssa new file mode 100644 index 00000000..b6bc9419 --- /dev/null +++ b/src/qbe/test/abi8.ssa @@ -0,0 +1,278 @@ +# riscv64 ABI stress +# see tools/abi8.py + +type :fi1 = { h, s } # in a gp & fp pair +type :fi2 = { s, w } # ditto +type :uw = { { w } } +type :fi3 = { s, :uw } # in a single gp reg +type :ss = { s, s } # in two fp regs +type :sd = { s, d } # ditto +type :ww = { w, w } # in a single gp reg +type :lb = { l, b } # in two gp regs +type :big = { b 17 } # by reference +type :ddd = { d, d, d} # big hfa on arm64 + +data $ctoqbestr = { b "c->qbe(%d)", b 0 } +data $emptystr = { b 0 } + +export +function $qfn0(s %p0, s %p1, s %p2, s %p3, s %p4, s %p5, s %p6, s %p7, s %p8) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 0) + call $ps(s %p8) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn1(w %p0, s %p1, :fi1 %p2) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 1) + call $pw(w %p0) + call $ps(s %p1) + call $pfi1(l %p2) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn2(w %p0, :fi2 %p1, s %p2) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 2) + call $pw(w %p0) + call $pfi2(l %p1) + call $ps(s %p2) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn3(w %p0, s %p1, :fi3 %p2) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 3) + call $pw(w %p0) + call $ps(s %p1) + call $pfi3(l %p2) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn4(:ss %p0) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 4) + call $pss(l %p0) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn5(d %p0, d %p1, d %p2, d %p3, d %p4, d %p5, d %p6, :ss %p7, s %p8, l %p9) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 5) + call $pss(l %p7) + call $ps(s %p8) + call $pl(l %p9) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn6(:lb %p0) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 6) + call $plb(l %p0) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn7(w %p0, w %p1, w %p2, w %p3, w %p4, w %p5, w %p6, :lb %p7) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 7) + call $plb(l %p7) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn8(w %p0, w %p1, w %p2, w %p3, w %p4, w %p5, w %p6, w %p7, :lb %p8) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 8) + call $plb(l %p8) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn9(:big %p0) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 9) + call $pbig(l %p0) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn10(w %p0, w %p1, w %p2, w %p3, w %p4, w %p5, w %p6, w %p7, :big %p8, s %p9, l %p10) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 10) + call $pbig(l %p8) + call $ps(s %p9) + call $pl(l %p10) + %r1 =w call $puts(l $emptystr) + ret +} +export +function $qfn11(:ddd %p0) { +@start + %r0 =w call $printf(l $ctoqbestr, ..., w 11) + call $pddd(l %p0) + %r1 =w call $puts(l $emptystr) + ret +} + +export +function w $main() { +@start + + call $cfn0(s 0, s 0, s 0, s 0, s 0, s 0, s 0, s 0, s s_9.9) + call $cfn1(w 1, s s_2.2, :fi1 $fi1) + call $cfn2(w 1, :fi2 $fi2, s s_3.3) + call $cfn3(w 1, s s_2.2, :fi3 $fi3) + call $cfn4(:ss $ss) + call $cfn5(d 0, d 0, d 0, d 0, d 0, d 0, d 0, :ss $ss, s s_9.9, l 10) + call $cfn6(:lb $lb) + call $cfn7(w 0, w 0, w 0, w 0, w 0, w 0, w 0, :lb $lb) + call $cfn8(w 0, w 0, w 0, w 0, w 0, w 0, w 0, w 0, :lb $lb) + call $cfn9(:big $big) + call $cfn10(w 0, w 0, w 0, w 0, w 0, w 0, w 0, w 0, :big $big, s s_10.10, l 11) + call $cfn11(:ddd $ddd) + + ret 0 +} + +# >>> driver +# #include +# typedef struct { short h; float s; } Sfi1; +# typedef struct { float s; int w; } Sfi2; +# typedef struct { float s; union { int w; } u; } Sfi3; +# typedef struct { float s0, s1; } Sss; +# typedef struct { float s; double d; } Ssd; +# typedef struct { int w0, w1; } Sww; +# typedef struct { long long l; char b; } Slb; +# typedef struct { char b[17]; } Sbig; +# typedef struct { double d0, d1, d2; } Sddd; +# Sfi1 zfi1, fi1 = { -123, 4.56 }; +# Sfi2 zfi2, fi2 = { 1.23, 456 }; +# Sfi3 zfi3, fi3 = { 3.45, 567 }; +# Sss zss, ss = { 1.23, 45.6 }; +# Ssd zsd, sd = { 2.34, 5.67 }; +# Sww zww, ww = { -123, -456 }; +# Slb zlb, lb = { 123, 'z' }; +# Sbig zbig, big = { "abcdefhijklmnopqr" }; +# Sddd zddd, ddd = { 1.23, 45.6, 7.89 }; +# void pfi1(Sfi1 *s) { printf(" { %d, %g }", s->h, s->s); } +# void pfi2(Sfi2 *s) { printf(" { %g, %d }", s->s, s->w); } +# void pfi3(Sfi3 *s) { printf(" { %g, %d }", s->s, s->u.w); } +# void pss(Sss *s) { printf(" { %g, %g }", s->s0, s->s1); } +# void psd(Ssd *s) { printf(" { %g, %g }", s->s, s->d); } +# void pww(Sww *s) { printf(" { %d, %d }", s->w0, s->w1); } +# void plb(Slb *s) { printf(" { %lld, '%c' }", s->l, s->b); } +# void pbig(Sbig *s) { printf(" \"%.17s\"", s->b); } +# void pddd(Sddd *s) { printf(" { %g, %g, %g }", s->d0, s->d1, s->d2); } +# void pw(int w) { printf(" %d", w); } +# void pl(long long l) { printf(" %lld", l); } +# void ps(float s) { printf(" %g", s); } +# void pd(double d) { printf(" %g", d); } +# /* --------------------------- */ +# extern void qfn0(float, float, float, float, float, float, float, float, float); +# void cfn0(float p0, float p1, float p2, float p3, float p4, float p5, float p6, float p7, float p8) { +# printf("qbe->c(%d)", 0); +# ps(p8); puts(""); +# qfn0(p0, p1, p2, p3, p4, p5, p6, p7, p8); +# } +# extern void qfn1(int, float, Sfi1); +# void cfn1(int p0, float p1, Sfi1 p2) { +# printf("qbe->c(%d)", 1); +# pw(p0); ps(p1); pfi1(&p2); puts(""); +# qfn1(p0, p1, p2); +# } +# extern void qfn2(int, Sfi2, float); +# void cfn2(int p0, Sfi2 p1, float p2) { +# printf("qbe->c(%d)", 2); +# pw(p0); pfi2(&p1); ps(p2); puts(""); +# qfn2(p0, p1, p2); +# } +# extern void qfn3(int, float, Sfi3); +# void cfn3(int p0, float p1, Sfi3 p2) { +# printf("qbe->c(%d)", 3); +# pw(p0); ps(p1); pfi3(&p2); puts(""); +# qfn3(p0, p1, p2); +# } +# extern void qfn4(Sss); +# void cfn4(Sss p0) { +# printf("qbe->c(%d)", 4); +# pss(&p0); puts(""); +# qfn4(p0); +# } +# extern void qfn5(double, double, double, double, double, double, double, Sss, float, long long); +# void cfn5(double p0, double p1, double p2, double p3, double p4, double p5, double p6, Sss p7, float p8, long long p9) { +# printf("qbe->c(%d)", 5); +# pss(&p7); ps(p8); pl(p9); puts(""); +# qfn5(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); +# } +# extern void qfn6(Slb); +# void cfn6(Slb p0) { +# printf("qbe->c(%d)", 6); +# plb(&p0); puts(""); +# qfn6(p0); +# } +# extern void qfn7(int, int, int, int, int, int, int, Slb); +# void cfn7(int p0, int p1, int p2, int p3, int p4, int p5, int p6, Slb p7) { +# printf("qbe->c(%d)", 7); +# plb(&p7); puts(""); +# qfn7(p0, p1, p2, p3, p4, p5, p6, p7); +# } +# extern void qfn8(int, int, int, int, int, int, int, int, Slb); +# void cfn8(int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7, Slb p8) { +# printf("qbe->c(%d)", 8); +# plb(&p8); puts(""); +# qfn8(p0, p1, p2, p3, p4, p5, p6, p7, p8); +# } +# extern void qfn9(Sbig); +# void cfn9(Sbig p0) { +# printf("qbe->c(%d)", 9); +# pbig(&p0); puts(""); +# qfn9(p0); +# } +# extern void qfn10(int, int, int, int, int, int, int, int, Sbig, float, long long); +# void cfn10(int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7, Sbig p8, float p9, long long p10) { +# printf("qbe->c(%d)", 10); +# pbig(&p8); ps(p9); pl(p10); puts(""); +# qfn10(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); +# } +# extern void qfn11(Sddd); +# void cfn11(Sddd p0) { +# printf("qbe->c(%d)", 11); +# pddd(&p0); puts(""); +# qfn11(p0); +# } +# <<< + +# >>> output +# qbe->c(0) 9.9 +# c->qbe(0) 9.9 +# qbe->c(1) 1 2.2 { -123, 4.56 } +# c->qbe(1) 1 2.2 { -123, 4.56 } +# qbe->c(2) 1 { 1.23, 456 } 3.3 +# c->qbe(2) 1 { 1.23, 456 } 3.3 +# qbe->c(3) 1 2.2 { 3.45, 567 } +# c->qbe(3) 1 2.2 { 3.45, 567 } +# qbe->c(4) { 1.23, 45.6 } +# c->qbe(4) { 1.23, 45.6 } +# qbe->c(5) { 1.23, 45.6 } 9.9 10 +# c->qbe(5) { 1.23, 45.6 } 9.9 10 +# qbe->c(6) { 123, 'z' } +# c->qbe(6) { 123, 'z' } +# qbe->c(7) { 123, 'z' } +# c->qbe(7) { 123, 'z' } +# qbe->c(8) { 123, 'z' } +# c->qbe(8) { 123, 'z' } +# qbe->c(9) "abcdefhijklmnopqr" +# c->qbe(9) "abcdefhijklmnopqr" +# qbe->c(10) "abcdefhijklmnopqr" 10.1 11 +# c->qbe(10) "abcdefhijklmnopqr" 10.1 11 +# qbe->c(11) { 1.23, 45.6, 7.89 } +# c->qbe(11) { 1.23, 45.6, 7.89 } +# <<< diff --git a/src/qbe/test/abi9.ssa b/src/qbe/test/abi9.ssa new file mode 100644 index 00000000..2fc1036b --- /dev/null +++ b/src/qbe/test/abi9.ssa @@ -0,0 +1,20 @@ +type :obj = { l, l, l, l } + +export +function :obj $f(l %self) { +@_0 + %_1 =l alloc8 16 + storel 77, %_1 + ret %_1 +} + +# >>> driver +# #include +# typedef struct { long long a, b, c, d; } obj; +# extern obj f(); +# int main() { obj ret = f(); printf("%lld\n", ret.a); return 0; } +# <<< + +# >>> output +# 77 +# <<< diff --git a/src/qbe/test/alias1.ssa b/src/qbe/test/alias1.ssa new file mode 100644 index 00000000..a064facf --- /dev/null +++ b/src/qbe/test/alias1.ssa @@ -0,0 +1,21 @@ +export function w $main() { +@start + %a =l alloc4 4 + %b =l alloc4 4 + storew 4, %a + storew 5, %b + +@loop + # %mem will be %a and %b successively, + # but we do not know it when processing + # the phi because %b goes through a cpy + %mem =l phi @start %a, @loop %bcpy + + %w =w load %mem + %eq5 =w ceqw %w, 5 + %bcpy =l copy %b + jnz %eq5, @exit, @loop + +@exit + ret 0 +} diff --git a/src/qbe/test/align.ssa b/src/qbe/test/align.ssa new file mode 100644 index 00000000..49f11837 --- /dev/null +++ b/src/qbe/test/align.ssa @@ -0,0 +1,17 @@ +export +function $test() { +@start + %x =l alloc16 16 + %y =l add %x, 8 + %m =w rem %y, 16 + storew %m, %y + %n =w loadw %y + storew %n, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 8 || a == -8); } +# <<< diff --git a/src/qbe/test/cmp1.ssa b/src/qbe/test/cmp1.ssa new file mode 100644 index 00000000..dd5bfa16 --- /dev/null +++ b/src/qbe/test/cmp1.ssa @@ -0,0 +1,17 @@ +# test cmp used in jnz as well as its result value + +export +function w $test(w %c) { +@start + %cmp =w cultw 1, %c + jnz %cmp, @yes, @no +@yes + %cmp =w copy 1 +@no + ret %cmp +} + +# >>> driver +# int test(int); +# int main(void) { return test(0); } +# <<< diff --git a/src/qbe/test/collatz.ssa b/src/qbe/test/collatz.ssa new file mode 100644 index 00000000..73e16ea9 --- /dev/null +++ b/src/qbe/test/collatz.ssa @@ -0,0 +1,62 @@ +# a solution for N=1000 to +# https://projecteuler.net/problem=14 +# we use a fast local array to +# memoize small collatz numbers + +export +function $test() { +@start + %mem =l alloc4 4000 +@loop + %n =w phi @start 1, @newm %n9, @oldm %n9 + %cmax =w phi @start 0, @newm %c, @oldm %cmax + %fin =w csltw %n, 1000 + jnz %fin, @cloop, @end +@cloop + %n0 =w phi @loop %n, @odd %n2, @even %n3 + %c0 =w phi @loop 0, @odd %c1, @even %c1 + %no1 =w cnew %n0, 1 + jnz %no1, @iter0, @endcl +@iter0 + %ism =w csltw %n0, %n + jnz %ism, @getmemo, @iter1 +@iter1 + %c1 =w add %c0, 1 + %p =w and %n0, 1 + jnz %p, @odd, @even +@odd + %n1 =w mul 3, %n0 + %n2 =w add %n1, 1 + jmp @cloop +@even + %n3 =w shr %n0, 1 + jmp @cloop +@getmemo # get the count for n0 in mem + %n0l =l extsw %n0 + %idx0 =l mul %n0l, 4 + %loc0 =l add %idx0, %mem + %cn0 =w loadw %loc0 + %c2 =w add %c0, %cn0 +@endcl # store the count for n in mem + %c =w phi @getmemo %c2, @cloop %c0 + %nl =l extsw %n + %idx1 =l mul %nl, 4 + %loc1 =l add %idx1, %mem + storew %c, %loc1 + %n9 =w add 1, %n + %big =w cslew %cmax, %c + jnz %big, @newm, @oldm +@newm + jmp @loop +@oldm + jmp @loop +@end + storew %cmax, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 178); } +# <<< diff --git a/src/qbe/test/conaddr.ssa b/src/qbe/test/conaddr.ssa new file mode 100644 index 00000000..0ded18ee --- /dev/null +++ b/src/qbe/test/conaddr.ssa @@ -0,0 +1,76 @@ +# skip amd64_win (no signals on win32) +# test amd64 addressing modes + +export +function w $f0(l %o) { +@start + %addr =l add $a, %o + %char =w loadub %addr + ret %char +} + +export +function w $f1(l %o) { +@start + %o1 =l mul %o, 1 + %addr =l add 10, %o1 + %char =w loadub %addr + ret %char +} + +export +function w $f2(l %o1, l %o2) { +@start + %o22 =l mul %o2, 2 + %o =l add %o1, %o22 + %addr =l add $a, %o + %char =w loadub %addr + ret %char +} + +export +function l $f3(l %o) { +@start + %addr =l add %o, $a + ret %addr +} + +export +function $f4() { +@start + storel $p, $p + ret +} + +export +function $writeto0() { +@start + storel 0, 0 + ret +} + +# >>> driver +# #include +# #include +# char a[] = "qbe rocks"; +# void *p; +# int ok; +# extern unsigned f0(long), f1(long), f2(long, long); +# extern char *f3(long); +# extern void f4(), writeto0(); +# void h(int sig, siginfo_t *si, void *unused) { +# ok += si->si_addr == 0; +# exit(!(ok == 6)); +# } +# int main() { +# struct sigaction sa = {.sa_flags=SA_SIGINFO, .sa_sigaction=h}; +# sigemptyset(&sa.sa_mask); sigaction(SIGSEGV, &sa, 0); +# ok += f0(2) == 'e'; +# ok += f1((long)a-5) == 'o'; +# ok += f2(4, 2) == 's'; +# ok += *f3(0) == 'q'; +# f4(); +# ok += p == &p; +# writeto0(); /* will segfault */ +# } +# <<< diff --git a/src/qbe/test/copy.ssa b/src/qbe/test/copy.ssa new file mode 100644 index 00000000..5c2a4d03 --- /dev/null +++ b/src/qbe/test/copy.ssa @@ -0,0 +1,15 @@ +export function w $f() { +@start + %x0 =w loadsb $a + # the extension must not be eliminated + # even though the load already extended + %x1 =l extsb %x0 + %c =w ceql %x1, -1 + ret %c +} + +# >>> driver +# char a = -1; +# extern int f(); +# int main() { return !(f() == 1); } +# <<< diff --git a/src/qbe/test/cprime.ssa b/src/qbe/test/cprime.ssa new file mode 100644 index 00000000..8fadef3c --- /dev/null +++ b/src/qbe/test/cprime.ssa @@ -0,0 +1,104 @@ +# generated by Andrew Chambers' +# compiler from the C program +# following in comments + +export +function w $main() { +@start + %v0 =l alloc8 4 + %v1 =l alloc8 4 + %v2 =l alloc8 4 + %v3 =l alloc8 4 + %v4 =l alloc8 4 + storew 5, %v1 + storew 11, %v2 + storew 12, %v3 +@L0 + %v5 =w loadw %v1 + %v6 =w cnew %v5, 201 + jnz %v6, @L8, @L1 +@L8 + storew 1, %v4 + %v7 =w loadw %v3 + %v8 =w rem %v7, 2 + %v9 =w ceqw %v8, 0 + jnz %v9, @L9, @L5 +@L9 + storew 0, %v4 +@L5 + storew 3, %v0 +@L2 + %v10 =w loadw %v0 + %v11 =w loadw %v3 + %v12 =w csltw %v10, %v11 + jnz %v12, @L10, @L3 +@L10 + %v13 =w loadw %v3 + %v14 =w loadw %v0 + %v15 =w rem %v13, %v14 + %v16 =w ceqw %v15, 0 + jnz %v16, @L11, @L4 +@L11 + storew 0, %v4 + jmp @L3 +@L4 + %v17 =w loadw %v0 + %v18 =w add %v17, 2 + storew %v18, %v0 + jmp @L2 +@L3 + %v19 =w loadw %v4 + jnz %v19, @L12, @L6 +@L12 + %v20 =w loadw %v3 + storew %v20, %v2 + %v21 =w loadw %v1 + %v22 =w add %v21, 1 + storew %v22, %v1 +@L6 + %v23 =w loadw %v3 + %v24 =w add %v23, 1 + storew %v24, %v3 + jmp @L0 +@L1 + %v25 =w loadw %v2 + %v26 =w cnew %v25, 1229 + jnz %v26, @L13, @L7 +@L13 + ret 1 +@L7 + ret 0 +@end + ret 0 +} + +# int +# main() +# { +# int i, n, p, next, isprime; +# +# n = 5; +# p = 11; +# next = 12; +# while(n != 201) { +# isprime = 1; +# if(next % 2 == 0) { +# isprime = 0; +# } else { +# for(i = 3; i < next; i = i + 2) { +# if(next % i == 0) { +# isprime = 0; +# break; +# } +# } +# } +# if(isprime) { +# p = next; +# n = n + 1; +# } +# next = next + 1; +# } +# if(p != 1229) +# return 1; +# return 0; +# } diff --git a/src/qbe/test/cup.ssa b/src/qbe/test/cup.ssa new file mode 100644 index 00000000..b53c86e5 --- /dev/null +++ b/src/qbe/test/cup.ssa @@ -0,0 +1,18 @@ +# counts up from -1988 to 1991 + +export +function $test() { +@start +@loop + %n0 =l phi @start -1988, @loop %n1 + %n1 =l add 1, %n0 + %cmp =w cslel 1991, %n1 + jnz %cmp, @end, @loop +@end + ret +} + +# >>> driver +# extern void test(void); +# int main() { test(); return 0; } +# <<< diff --git a/src/qbe/test/dark.ssa b/src/qbe/test/dark.ssa new file mode 100644 index 00000000..c508e486 --- /dev/null +++ b/src/qbe/test/dark.ssa @@ -0,0 +1,32 @@ +# skip arm64 arm64_apple rv64 amd64_win +# a hack example, +# we use a dark type to get +# a pointer to the stack. + +type :magic = align 1 { 0 } + +data $ret = { l 0 } + +export +function $test(:magic %p) { +@start + %av =w loadw $a + %a1 =w add 1, %av + storew %a1, $a # increment $a + %r1 =l loadl $ret # fetch from $ret + %p1 =l add %p, -8 + %r2 =l loadl %p1 # get the return address + storel %r2, $ret # store it in $ret + %c =w ceql %r1, %r2 + jnz %c, @fin, @cal +@cal + %i =w call $test() # no argument given, intentionally! +@fin + ret +} + +# >>> driver +# extern void test(void); +# int a = 2; +# int main() { test(); return !(a == 5); } +# <<< diff --git a/src/qbe/test/double.ssa b/src/qbe/test/double.ssa new file mode 100644 index 00000000..ac6c4c56 --- /dev/null +++ b/src/qbe/test/double.ssa @@ -0,0 +1,25 @@ +export +function $test() { +@start + %x1 =d copy d_0.1 + %x2 =d add d_0.2, %x1 + %x3 =d sub %x2, d_0.3 + +@loop + %x4 =d phi @start %x3, @loop %x5 + %i1 =w phi @start 0, @loop %i2 + %x5 =d add %x4, %x4 + %i2 =w add %i1, 1 + %c0 =w cled %x5, 4607182418800017408 # d_1.0 + jnz %c0, @loop, @end + +@end + storew %i2, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 55); } +# <<< diff --git a/src/qbe/test/dynalloc.ssa b/src/qbe/test/dynalloc.ssa new file mode 100644 index 00000000..7c54e887 --- /dev/null +++ b/src/qbe/test/dynalloc.ssa @@ -0,0 +1,27 @@ +# make sure dynamic allocations +# and caller-save regs interact +# soundly + +function $g() { +@start + ret +} + +function w $f(w %arg) { +@start + call $g() +@alloc + %r =l alloc8 16 + storel 180388626474, %r + %r8 =l add 8, %r + storel 180388626474, %r8 + ret %arg +} + +export +function w $main() { +@start + %a =w call $f(w 0) + %b =w call $f(w 0) + ret %a +} diff --git a/src/qbe/test/echo.ssa b/src/qbe/test/echo.ssa new file mode 100644 index 00000000..5e48b0e9 --- /dev/null +++ b/src/qbe/test/echo.ssa @@ -0,0 +1,33 @@ +export +function w $main(w %argc, l %argv) { +@start + %fmt =l alloc8 8 + storel 1663398693, %fmt # "%s%c" + %av0 =l add %argv, 8 + %ac0 =w sub %argc, 1 +@loop + %av =l phi @start %av0, @loop2 %av1 + %ac =w phi @start %ac0, @loop2 %ac1 + %c0 =w ceqw %ac, 0 + jnz %c0, @end, @loop1 +@loop1 + %c1 =w ceqw %ac, 1 + jnz %c1, @last, @nolast +@last + jmp @loop2 +@nolast + jmp @loop2 +@loop2 + %sep =w phi @last 10, @nolast 32 + %arg =l loadl %av + %r =w call $printf(l %fmt, ..., l %arg, w %sep) + %av1 =l add %av, 8 + %ac1 =w sub %ac, 1 + jmp @loop +@end + ret 0 +} + +# >>> output +# a b c +# <<< diff --git a/src/qbe/test/env.ssa b/src/qbe/test/env.ssa new file mode 100644 index 00000000..7a7bb58c --- /dev/null +++ b/src/qbe/test/env.ssa @@ -0,0 +1,21 @@ +# sanity checks for env calls + +function l $epar(env %e, l %i) { +@start + %x =l add %e, %i + ret %x +} + +export function l $earg(l %a, l %b) { +@start + %r1 =l call $epar(env %a, l %b) + # okay to call a regular function + # with an env argument + %r2 =l call $labs(env 113, l %r1) + ret %r2 +} + +# >>> driver +# extern long earg(long, long); +# int main(void) { return !(earg(2, -44) == 42); } +# <<< diff --git a/src/qbe/test/eucl.ssa b/src/qbe/test/eucl.ssa new file mode 100644 index 00000000..838c1b8c --- /dev/null +++ b/src/qbe/test/eucl.ssa @@ -0,0 +1,25 @@ +# euclide's algorithm in ssa +# it is a fairly interesting +# ssa program because of the +# swap of b and a + +export +function $test() { +@start + +@loop + %a =w phi @start 380, @loop %r + %b =w phi @start 747, @loop %a + %r =w rem %b, %a + jnz %r, @loop, @end + +@end + storew %a, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 1); } +# <<< diff --git a/src/qbe/test/euclc.ssa b/src/qbe/test/euclc.ssa new file mode 100644 index 00000000..34492341 --- /dev/null +++ b/src/qbe/test/euclc.ssa @@ -0,0 +1,30 @@ +export +function w $test() { +@l0 + %a =l alloc4 4 + %b =l alloc4 4 + %r =l alloc4 4 + storew 747, %a + storew 380, %b +@l1 + %t4 =w loadw %b + jnz %t4, @l2, @l3 +@l2 + %t7 =w loadw %a + %t8 =w loadw %b + %t6 =w rem %t7, %t8 + storew %t6, %r + %t10 =w loadw %b + storew %t10, %a + %t12 =w loadw %r + storew %t12, %b + jmp @l1 +@l3 + %t13 =w loadw %a + ret %t13 +} + +# >>> driver +# extern int test(void); +# int main() { return !(test() == 1); } +# <<< diff --git a/src/qbe/test/fixarg.ssa b/src/qbe/test/fixarg.ssa new file mode 100644 index 00000000..85824fda --- /dev/null +++ b/src/qbe/test/fixarg.ssa @@ -0,0 +1,15 @@ +# regression test for 3bec2c60 + +export +function w $test() { +@start + %x1 =l alloc8 8 + %x2 =l alloc8 8 + %r =w cnel %x1, %x2 # both operands need fixing + ret %r +} + +# >>> driver +# extern int test(); +# int main() { return !(test() == 1); } +# <<< diff --git a/src/qbe/test/fold1.ssa b/src/qbe/test/fold1.ssa new file mode 100644 index 00000000..9fb5d71a --- /dev/null +++ b/src/qbe/test/fold1.ssa @@ -0,0 +1,47 @@ +export +function w $f1() { +@start + %x =w sar 2147483648, 31 + ret %x +} + +export +function w $f2() { +@start + %x =w div 4294967040, 8 # -256 / 8 + ret %x +} + +export +function w $f3() { +@start + %x =w rem 4294967284, 7 # -12 % 7 + ret %x +} + +export +function w $f4() { +@start + %x =w shr 4294967296, 1 # 2^32 >> 1 + ret %x +} + +export +function w $f5() { +@start + %x =w udiv 1, 4294967297 # 1 / (2^32 + 1) + ret %x +} + +export +function w $f6() { +@start + %x =w urem 4294967296, 7 # 2^32 % 7 + ret %x +} + +# >>> driver +# extern int f1(), f2(), f3(), f4(), f5(), f6(); +# int main() { return !(f1() == -1 && f2() == -32 && f3() == -5 && +# f4() == 0 && f5() == 1 && f6() == 0); } +# <<< diff --git a/src/qbe/test/fpcnv.ssa b/src/qbe/test/fpcnv.ssa new file mode 100644 index 00000000..3fe078f7 --- /dev/null +++ b/src/qbe/test/fpcnv.ssa @@ -0,0 +1,134 @@ +# floating point casts and conversions + +export +function s $fneg(s %f) { +@fneg + %b0 =w cast %f + %b1 =w xor 2147483648, %b0 + %rs =s cast %b1 + ret %rs +} + +export +function d $ftrunc(d %f) { +@ftrunc + %l0 =w dtosi %f + %rt =d swtof %l0 + ret %rt +} + +export +function s $wtos(w %w) { +@start + %rt =s uwtof %w + ret %rt +} +export +function d $wtod(w %w) { +@start + %rt =d uwtof %w + ret %rt +} + +export +function s $ltos(l %l) { +@start + %rt =s ultof %l + ret %rt +} +export +function d $ltod(l %l) { +@start + %rt =d ultof %l + ret %rt +} + +export +function w $stow(s %f) { +@start + %rt =w stoui %f + ret %rt +} +export +function w $dtow(d %f) { +@start + %rt =w dtoui %f + ret %rt +} + +export +function l $stol(s %f) { +@start + %rt =l stoui %f + ret %rt +} +export +function l $dtol(d %f) { +@start + %rt =l dtoui %f + ret %rt +} + + + +# >>> driver +# #include +# #include +# +# extern float fneg(float); +# extern double ftrunc(double); +# +# extern float wtos(unsigned int); +# extern double wtod(unsigned int); +# extern float ltos(long long unsigned int); +# extern double ltod(long long unsigned int); +# +# extern unsigned int stow(float); +# extern unsigned int dtow(double); +# extern unsigned long long stol(float); +# extern unsigned long long dtol(double); +# +# unsigned long long iin[] = { 0, 1, 16, 234987, 427386245, 0x7fff0000, +# 0xffff0000, 23602938196141, 72259248152500195, 9589010795705032704ull, +# 0xdcf5fbe299d0148aull, 0xffffffff00000000ull, -1 }; +# +# double fin[] = { 0.17346516197824458, 442.0760005466251, 4342856.879893436, +# 4294967295.0, 98547543006.49626, 236003043787688.3, 9.499222733527032e+18, +# 1.1936266170755652e+19 }; +# +# int main() { +# int i; +# +# if (fneg(1.23f) != -1.23f) return 1; +# if (ftrunc(3.1415) != 3.0) return 2; +# if (ftrunc(-1.234) != -1.0) return 3; +# +# for (i=0; i= 1LL << DBL_MANT_DIG) +# break; +# if (dtol(fin[i]) != (unsigned long long)fin[i]) +# return 8; +# if((unsigned long long)fin[i] > UINT_MAX) +# continue; +# if (dtow(fin[i]) != (unsigned int)fin[i]) +# return 9; +# if (fin[i] >= 1LL << FLT_MANT_DIG) +# continue; +# if (stol((float)fin[i]) != (unsigned long long)(float)fin[i]) +# return 10; +# if (stow((float)fin[i]) != (unsigned int)(float)fin[i]) +# return 11; +# } +# return 0; +# } +# <<< diff --git a/src/qbe/test/gvn1.ssa b/src/qbe/test/gvn1.ssa new file mode 100644 index 00000000..d47f05b0 --- /dev/null +++ b/src/qbe/test/gvn1.ssa @@ -0,0 +1,19 @@ +export +function w $test(w %p1, w %p2) { +@start +@entry + %t1 =w copy 1 + jnz %t1, @live, @dead1 +@live + %t2 =w add %p1, %p2 + ret %t2 +@dead1 + %t2 =w add %p1, %p2 # live ins in dead blk +@dead2 + jnz %t1, @live, @dead1 +} + +# >>> driver +# extern int test(int p1, int p2); +# int main() { return test(1, 2) != 3; } +# <<< diff --git a/src/qbe/test/gvn2.ssa b/src/qbe/test/gvn2.ssa new file mode 100644 index 00000000..33f9a963 --- /dev/null +++ b/src/qbe/test/gvn2.ssa @@ -0,0 +1,31 @@ +# GVN 0/non-0 inference removes @yesyes, @yesno, @noyes, @nono + +export +function w $test(w %c) { +@start + jnz %c, @yes, @no +@yes + %c0 =w cnew %c, 0 + jnz %c0, @yesyes, @yesno +@yesyes + %rc =w copy 1 + jmp @end +@yesno + %rc =w copy 111 + jmp @end +@no + %c1 =w cnew %c, 0 + jnz %c1, @noyes, @nono +@noyes + %rc =w copy 222 + jmp @end +@nono + %rc =w copy 0 +@end + ret %rc +} + +# >>> driver +# int test(int); +# int main(void) { return test(0); } +# <<< diff --git a/src/qbe/test/ifc.ssa b/src/qbe/test/ifc.ssa new file mode 100644 index 00000000..29f4457d --- /dev/null +++ b/src/qbe/test/ifc.ssa @@ -0,0 +1,238 @@ +export +function l $ifc1(l %v0, l %v1, w %c) { +@start + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifc2(l %v0, l %v1, w %p) { +@start + %c =w cnew %p, 42 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifc3(l %v0, l %v1, w %p) { +@start + %c =w cugtw %p, 42 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifclts(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w clts %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcles(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cles %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcgts(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cgts %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcges(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cges %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifceqs(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w ceqs %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcnes(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cnes %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcos(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cos %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +export +function l $ifcuos(s %s0, s %s1, l %v0, l %v1) { +@start + %c =w cuos %s0, %s1 + jnz %c, @true, @false +@true + %v =l copy %v1 + jmp @end +@false + %v =l copy %v0 + jmp @end +@end + ret %v +} + +# >>> driver +# extern long ifc1(long, long, int); +# extern long ifc2(long, long, int); +# extern long ifc3(long, long, int); +# extern long ifclts(float, float, long, long); +# extern long ifcles(float, float, long, long); +# extern long ifcgts(float, float, long, long); +# extern long ifcges(float, float, long, long); +# extern long ifceqs(float, float, long, long); +# extern long ifcnes(float, float, long, long); +# extern long ifcos(float, float, long, long); +# extern long ifcuos(float, float, long, long); +# int main() { +# return +# ifc1(7, 5, 0) != 7 +# || ifc1(7, 5, 1) != 5 +# || ifc1(7, 5, 33) != 5 +# || ifc2(7, 5, 42) != 7 +# || ifc2(7, 5, 41) != 5 +# || ifc2(7, 5, 43) != 5 +# || ifc3(7, 5, 42) != 7 +# || ifc3(7, 5, 41) != 7 +# || ifc3(7, 5, 43) != 5 +# || ifclts(5.0f, 6.0f, 7, 5) != 5 +# || ifclts(5.0f, 5.0f, 7, 5) != 7 +# || ifclts(5.0f, 4.0f, 7, 5) != 7 +# || ifclts(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifclts(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifclts(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcles(5.0f, 6.0f, 7, 5) != 5 +# || ifcles(5.0f, 5.0f, 7, 5) != 5 +# || ifcles(5.0f, 4.0f, 7, 5) != 7 +# || ifcles(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcles(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifcles(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcgts(5.0f, 6.0f, 7, 5) != 7 +# || ifcgts(5.0f, 5.0f, 7, 5) != 7 +# || ifcgts(5.0f, 4.0f, 7, 5) != 5 +# || ifcgts(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcgts(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifcgts(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcges(5.0f, 6.0f, 7, 5) != 7 +# || ifcges(5.0f, 5.0f, 7, 5) != 5 +# || ifcges(5.0f, 4.0f, 7, 5) != 5 +# || ifcges(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcges(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifcges(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifceqs(5.0f, 6.0f, 7, 5) != 7 +# || ifceqs(5.0f, 5.0f, 7, 5) != 5 +# || ifceqs(5.0f, 4.0f, 7, 5) != 7 +# || ifceqs(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifceqs(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifceqs(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcnes(5.0f, 6.0f, 7, 5) != 5 +# || ifcnes(5.0f, 5.0f, 7, 5) != 7 +# || ifcnes(5.0f, 4.0f, 7, 5) != 5 +# || ifcnes(5.0f, 0.0f/0.0f, 7, 5) != 5 +# || ifcnes(0.0f/0.0f, 5.0f, 7, 5) != 5 +# || ifcnes(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 5 +# || ifcos(5.0f, 6.0f, 7, 5) != 5 +# || ifcos(5.0f, 5.0f, 7, 5) != 5 +# || ifcos(5.0f, 4.0f, 7, 5) != 5 +# || ifcos(5.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcos(0.0f/0.0f, 5.0f, 7, 5) != 7 +# || ifcos(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 7 +# || ifcuos(5.0f, 6.0f, 7, 5) != 7 +# || ifcuos(5.0f, 5.0f, 7, 5) != 7 +# || ifcuos(5.0f, 4.0f, 7, 5) != 7 +# || ifcuos(5.0f, 0.0f/0.0f, 7, 5) != 5 +# || ifcuos(0.0f/0.0f, 5.0f, 7, 5) != 5 +# || ifcuos(0.0f/0.0f, 0.0f/0.0f, 7, 5) != 5 +# ; +# } +# <<< diff --git a/src/qbe/test/isel1.ssa b/src/qbe/test/isel1.ssa new file mode 100644 index 00000000..879a8718 --- /dev/null +++ b/src/qbe/test/isel1.ssa @@ -0,0 +1,24 @@ +# tests that the address matcher is not +# confused by the two multiplications + +# note: the code handling apple asm fixes +# ruins the good work of the matcher here, +# I should revisit these fixes + +export function w $f(l %i, l %j) { +@start + %off1 =l mul %i, 8 + %a_i =l add $a, %off1 + %off2 =l mul %j, 4 + %a_ij =l add %a_i, %off2 + %x =w loadsw %a_ij + ret %x +} + +# >>> driver +# int a[] = {1, 2, 3, 4}; +# extern int f(long long, long long); +# int main() { +# return !(f(0, 0) == 1 && f(0, 1) == 2 && f(1, 0) == 3 && f(1, 1) == 4); +# } +# <<< diff --git a/src/qbe/test/isel2.ssa b/src/qbe/test/isel2.ssa new file mode 100644 index 00000000..10864958 --- /dev/null +++ b/src/qbe/test/isel2.ssa @@ -0,0 +1,122 @@ +# tests that NaN is handled properly by +# floating point comparisons + +export function w $lt(d %x, d %y) { +@start + %r =w cltd %x, %y + ret %r +} + +export function w $le(d %x, d %y) { +@start + %r =w cled %x, %y + ret %r +} + +export function w $gt(d %x, d %y) { +@start + %r =w cgtd %x, %y + ret %r +} + +export function w $ge(d %x, d %y) { +@start + %r =w cged %x, %y + ret %r +} + +export function w $eq1(d %x, d %y) { +@start + %r =w ceqd %x, %y + ret %r +} + +export function w $eq2(d %x, d %y) { +@start + %r =w ceqd %x, %y + jnz %r, @true, @false +@true + ret 1 +@false + ret 0 +} + +export function w $eq3(d %x, d %y) { +@start + %r =w ceqd %x, %y + jnz %r, @true, @false +@true + ret %r +@false + ret 0 +} + +export function w $ne1(d %x, d %y) { +@start + %r =w cned %x, %y + ret %r +} + +export function w $ne2(d %x, d %y) { +@start + %r =w cned %x, %y + jnz %r, @true, @false +@true + ret 1 +@false + ret 0 +} + +export function w $ne3(d %x, d %y) { +@start + %r =w cned %x, %y + jnz %r, @true, @false +@true + ret %r +@false + ret 0 +} + +export function w $o(d %x, d %y) { +@start + %r =w cod %x, %y + ret %r +} + +export function w $uo(d %x, d %y) { +@start + %r =w cuod %x, %y + ret %r +} + +# >>> driver +# #include +# extern int lt(double, double); +# extern int le(double, double); +# extern int gt(double, double); +# extern int ge(double, double); +# extern int eq1(double, double); +# extern int eq2(double, double); +# extern int eq3(double, double); +# extern int ne1(double, double); +# extern int ne2(double, double); +# extern int ne3(double, double); +# extern int o(double, double); +# extern int uo(double, double); +# int main(void) { +# /* LessThan Equal GreaterThan Unordered */ +# return !lt(0, 1) + lt(0, 0) + lt(1, 0) + lt(NAN, NAN) +# + !le(0, 1) + !le(0, 0) + le(1, 0) + le(NAN, NAN) +# + gt(0, 1) + gt(0, 0) + !gt(1, 0) + gt(NAN, NAN) +# + ge(0, 1) + !ge(0, 0) + !ge(1, 0) + ge(NAN, NAN) +# + eq1(0, 1) + !eq1(0, 0) + eq1(1, 0) + eq1(NAN, NAN) +# + eq2(0, 1) + !eq2(0, 0) + eq2(1, 0) + eq2(NAN, NAN) +# + eq3(0, 1) + !eq3(0, 0) + eq3(1, 0) + eq3(NAN, NAN) +# + !ne1(0, 1) + ne1(0, 0) + !ne1(1, 0) + !ne1(NAN, NAN) +# + !ne2(0, 1) + ne2(0, 0) + !ne2(1, 0) + !ne2(NAN, NAN) +# + !ne3(0, 1) + ne3(0, 0) + !ne3(1, 0) + !ne3(NAN, NAN) +# + !o(0, 1) + !o(0, 0) + !o(1, 0) + o(NAN, NAN) +# + uo(0, 1) + uo(0, 0) + uo(1, 0) + !uo(NAN, NAN) +# ; +# } +# <<< diff --git a/src/qbe/test/isel3.ssa b/src/qbe/test/isel3.ssa new file mode 100644 index 00000000..5e8862ac --- /dev/null +++ b/src/qbe/test/isel3.ssa @@ -0,0 +1,87 @@ +export function w $slt(w %x, w %y) { +@start + %r =w csltw %x, %y + ret %r +} + +export function w $sle(w %x, w %y) { +@start + %r =w cslew %x, %y + ret %r +} + +export function w $sgt(w %x, w %y) { +@start + %r =w csgtw %x, %y + ret %r +} + +export function w $sge(w %x, w %y) { +@start + %r =w csgew %x, %y + ret %r +} + +export function w $ult(w %x, w %y) { +@start + %r =w cultw %x, %y + ret %r +} + +export function w $ule(w %x, w %y) { +@start + %r =w culew %x, %y + ret %r +} + +export function w $ugt(w %x, w %y) { +@start + %r =w cugtw %x, %y + ret %r +} + +export function w $uge(w %x, w %y) { +@start + %r =w cugew %x, %y + ret %r +} + +export function w $eq(w %x, w %y) { +@start + %r =w ceqw %x, %y + ret %r +} + +export function w $ne(w %x, w %y) { +@start + %r =w cnew %x, %y + ret %r +} + +# >>> driver +# #include +# extern int slt(int, int); +# extern int sle(int, int); +# extern int sgt(int, int); +# extern int sge(int, int); +# extern int ult(unsigned, unsigned); +# extern int ule(unsigned, unsigned); +# extern int ugt(unsigned, unsigned); +# extern int uge(unsigned, unsigned); +# extern int eq(unsigned, unsigned); +# extern int ne(unsigned, unsigned); +# int main(void) { +# /* LessThan Equal GreaterThan */ +# return !slt(-1, 0) + slt(0, 0) + slt(0, -1) +# + !sle(-1, 0) + !sle(0, 0) + sle(0, -1) +# + sgt(-1, 0) + sgt(0, 0) + !sgt(0, -1) +# + sge(-1, 0) + !sge(0, 0) + !sge(0, -1) +# + !ult(0, -1) + ult(0, 0) + ult(-1, 0) +# + !ule(0, -1) + !ule(0, 0) + ule(-1, 0) +# + ugt(0, -1) + ugt(0, 0) + !ugt(-1, 0) +# + uge(0, -1) + !uge(0, 0) + !uge(-1, 0) +# + eq(0, 1) + !eq(0, 0) + eq(1, 0) +# + !ne(0, 1) + ne(0, 0) + !ne(1, 0) +# ; +# } +# <<< diff --git a/src/qbe/test/isel4.ssa b/src/qbe/test/isel4.ssa new file mode 100644 index 00000000..874807ee --- /dev/null +++ b/src/qbe/test/isel4.ssa @@ -0,0 +1,64 @@ +# amd64 address-folding stress + +export function w $f0(l %a, l %b) { +@start + %c =l add %b, 2 + %d =l mul %c, 4 + %e =l add %a, %d + %q =l loadw %e + ret %q +} + +export function w $f1(l %a, l %b) { +@start + %c =l add 1, %b + %f =l add %c, 1 + %d =l mul %f, 4 + %e =l add %d, %a + %q =l loadw %e + ret %q +} + +export function w $f2(l %a, l %b) { +@start + %l =l mul %b, 4 + %d =l add 8, %l + %e =l add %a, %d + %q =l loadw %e + ret %q +} + +# fixme: folding is not good here +export function w $f3(l %a, l %b) { +@start + %l =l mul %b, 4 + %d =l add 4, %l + %f =l add 4, %d + %e =l add %a, %f + %q =l loadw %e + ret %q +} + +export function w $f4(l %a, l %b) { +@start + %c =l add 1, %b + %d =l mul %c, 4 + %e =l add 4, %d + %f =l add %e, %a + %q =l loadw %f + ret %q +} + +# >>> driver +# int a[] = {1, 2, 3, 4}; +# typedef int loadf(int *, long long); +# extern loadf f0, f1, f2, f3, f4; +# loadf *fns[] = {&f0, &f1, &f2, &f3, &f4, 0}; +# int main() { +# loadf **f; +# int n; +# for (n=1,f=fns; *f; f++,n++) +# if ((*f)(a, 1) != 4) return n; +# return 0; +# } +# <<< diff --git a/src/qbe/test/isel5.ssa b/src/qbe/test/isel5.ssa new file mode 100644 index 00000000..9c546d71 --- /dev/null +++ b/src/qbe/test/isel5.ssa @@ -0,0 +1,16 @@ +# make sure the local symbols used for +# fp constants do not get a _ prefix +# on apple arm hardware + +export function w $main() { +@start + %r =d copy d_1.2 + %x =w call $printf(l $fmt, ..., d %r) + ret 0 +} + +data $fmt = { b "%.06f\n", b 0 } + +# >>> output +# 1.200000 +# <<< diff --git a/src/qbe/test/isel6.ssa b/src/qbe/test/isel6.ssa new file mode 100644 index 00000000..18465715 --- /dev/null +++ b/src/qbe/test/isel6.ssa @@ -0,0 +1,38 @@ +# make sure large consts are lowered +# without an offset +# i.e. not movq $9223372036854775807, 64(%rax) + +export function w $main() { +@_0 + %_1 =w call $myfunc(l 1, l 2, l 3, l 4, l 5, l 6, l 7, l 8, l 9223372036854775807) + ret 0 +} + +# >>> driver +# #include +# #include +# #include +# void myfunc(int64_t a, int64_t b, int64_t c, int64_t d, int64_t e, int64_t f, int64_t g, int64_t h, int64_t i) { +# printf("%" PRId64 "\n", a); +# printf("%" PRId64 "\n", b); +# printf("%" PRId64 "\n", c); +# printf("%" PRId64 "\n", d); +# printf("%" PRId64 "\n", e); +# printf("%" PRId64 "\n", f); +# printf("%" PRId64 "\n", g); +# printf("%" PRId64 "\n", h); +# printf("%" PRId64 "\n", i); +# } +# <<< + +# >>> output +# 1 +# 2 +# 3 +# 4 +# 5 +# 6 +# 7 +# 8 +# 9223372036854775807 +# <<< diff --git a/src/qbe/test/ldbits.ssa b/src/qbe/test/ldbits.ssa new file mode 100644 index 00000000..5e544873 --- /dev/null +++ b/src/qbe/test/ldbits.ssa @@ -0,0 +1,40 @@ +# unit tests for load elimination + +export +function $tests() { +@start + %p =l alloc8 16 + %p3 =l add %p, 3 + %p4 =l add %p, 4 + %p6 =l add %p, 6 + %p8 =l add %p, 8 +@test1 + storew 1, $a + storel 1311768467139281697, %p + storeh 255, %p8 + %x1 =w load %p6 + %c1 =w cnew %x1, 16716340 + jnz %c1, @fail, @test2 +@test2 + storew 2, $a + %x2 =w loadub %p3 + %c2 =w cnew %x2, 135 + jnz %c2, @fail, @test3 +@test3 + storew 3, $a + storew 2864434397, %p8 + %x3 =l load %p3 + %c3 =w cnel %x3, -4914310023110821753 + jnz %c3, @fail, @test4 +@test4 +@ok + storew 0, $a +@fail + ret +} + +# >>> driver +# extern void tests(void); +# int a; +# int main() { tests(); return a; } +# <<< diff --git a/src/qbe/test/ldhoist.ssa b/src/qbe/test/ldhoist.ssa new file mode 100644 index 00000000..d4b1b64b --- /dev/null +++ b/src/qbe/test/ldhoist.ssa @@ -0,0 +1,21 @@ +# loads must not be unsafely hoisted + +export +function w $f(w %n, l %p) { +@start + %r =w copy 0 +@loop + %n =w sub %n, 1 + %c =w csgew %n, 0 + jnz %c, @loop1, @end +@loop1 + %r =w loadw %p + jmp @loop +@end + ret %r +} + +# >>> driver +# extern int f(int, int *); +# int main() { return f(0, 0); } +# <<< diff --git a/src/qbe/test/load1.ssa b/src/qbe/test/load1.ssa new file mode 100644 index 00000000..a87fd2d8 --- /dev/null +++ b/src/qbe/test/load1.ssa @@ -0,0 +1,27 @@ +# checks that phi arguments are correctly +# handled in alias analysis + +export +function w $f(w %cond) { +@start + %x =l alloc4 4 + %y =l alloc4 4 + storew 0, %x + jnz %cond, @true, @false +@true + jmp @end +@false + jmp @end +@end + %ptr =l phi @true %x, @false %y + storew 1, %ptr + %result =w loadsw %x + ret %result +} + +# >>> driver +# extern int f(int); +# int main() { +# return !(f(0) == 0 && f(1) == 1); +# } +# <<< diff --git a/src/qbe/test/load2.ssa b/src/qbe/test/load2.ssa new file mode 100644 index 00000000..05c12a66 --- /dev/null +++ b/src/qbe/test/load2.ssa @@ -0,0 +1,75 @@ +# blit & load elimination + +export +function $f() { +@start + %x =l alloc4 12 + %y =l alloc4 12 + + %x1 =l add 1, %x + %x2 =l add 1, %x1 + %x3 =l add 1, %x2 + %x4 =l add 1, %x3 + %x5 =l add 1, %x4 + %x6 =l add 1, %x5 + %x7 =l add 1, %x6 + %x8 =l add 1, %x7 + %x9 =l add 1, %x8 + %xa =l add 1, %x9 + %xb =l add 1, %xa + + %y1 =l add 1, %y + %y4 =l add 4, %y + + storew 287454020, %x4 # 0x11223344 + storew 1432778632, %y # 0x55667788 + blit %y, %x5, 1 + %n =w load %x4 + call $px(w %n) # 0x11228844 + + storew 287454020, %x4 # 0x11223344 + storew 1432778632, %y # 0x55667788 + blit %y, %x5, 2 + %n =w load %x4 + call $px(w %n) # 0x11778844 + + storew 287454020, %x4 # 0x11223344 + storew 1432778632, %y # 0x55667788 + blit %y, %x5, 4 + %n =w load %x4 + call $px(w %n) # 0x66778844 + + storew 287454020, %x4 # 0x11223344 + storew 1432778632, %y # 0x55667788 + blit %y, %x2, 4 + %n =w load %x4 + call $px(w %n) # 0x11225566 + + storew 287454020, %x4 # 0x11223344 + storew 0, %y + storew 1432778632, %y4 # 0x55667788 + blit %y1, %x2, 7 + %n =w load %x4 + call $px(w %n) # 0x66778800 + + ret +} + +# >>> driver +# #include +# void px(unsigned n) { +# printf("0x%08x\n", n); +# } +# int main() { +# extern void f(void); +# f(); +# } +# <<< + +# >>> output +# 0x11228844 +# 0x11778844 +# 0x66778844 +# 0x11225566 +# 0x66778800 +# <<< diff --git a/src/qbe/test/load3.ssa b/src/qbe/test/load3.ssa new file mode 100644 index 00000000..73d60aa1 --- /dev/null +++ b/src/qbe/test/load3.ssa @@ -0,0 +1,50 @@ +# regression test for load() +# see comment below + +function w $rand() { +@start + ret 0 +} + +function w $chk(w %a, w %b) { +@start + %ok =w ceqw %a, 1 + %ok1 =w ceqw %b, 0 + %ok2 =w and %ok, %ok1 + %ret =w xor %ok2, 1 + ret %ret +} + +export +function w $main() { +@start + %s0 =l alloc4 8 + %s1 =l alloc4 8 + + storew 1, %s0 + %s04 =l add 4, %s0 + storew 0, %s04 + + %rnd =w call $rand() + jnz %rnd, @tt, @ff +@tt + jmp @blit +@ff + jmp @blit + +@blit + # we make sure def() checks + # offsets correctly when + # processing inserted phis; + # if not, %w1 will bogusly + # have the same value as %w0 + + blit %s0, %s1, 8 + + %w0 =w load %s1 + %s14 =l add 4, %s1 + %w1 =w load %s14 + + %ret =w call $chk(w %w0, w %w1) + ret %ret +} diff --git a/src/qbe/test/loop.ssa b/src/qbe/test/loop.ssa new file mode 100644 index 00000000..98914d91 --- /dev/null +++ b/src/qbe/test/loop.ssa @@ -0,0 +1,24 @@ +# simple looping program +# sums all integers from 100 to 0 + +export +function $test() { +@start + +@loop + %s =w phi @start 0, @loop %s1 + %n =w phi @start 100, @loop %n1 + %s1 =w add %s, %n + %n1 =w sub %n, 1 + jnz %n1, @loop, @end + +@end + storew %s1, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 5050); } +# <<< diff --git a/src/qbe/test/mandel.ssa b/src/qbe/test/mandel.ssa new file mode 100644 index 00000000..67d960ab --- /dev/null +++ b/src/qbe/test/mandel.ssa @@ -0,0 +1,124 @@ +# Print the Mandelbrot set on the +# terminal line output. + +function w $mandel(d %x, d %y) { +@mandel + %cr =d sub %y, d_0.5 + %ci =d copy %x +@loop + %i =w phi @mandel 0, @loop1 %i1 + %zr =d phi @mandel d_0, @loop1 %zr1 + %zi =d phi @mandel d_0, @loop1 %zi1 + %i1 =w add 1, %i + %tmp =d mul %zr, %zi + %zr2 =d mul %zr, %zr + %zi2 =d mul %zi, %zi + %zrx =d sub %zr2, %zi2 + %zr1 =d add %zrx, %cr + %zix =d add %tmp, %tmp + %zi1 =d add %zix, %ci + %sum =d add %zi2, %zr2 + %cmp1 =w cgtd %sum, d_16 + jnz %cmp1, @reti, @loop1 +@loop1 + %cmp2 =w csgtw %i1, 1000 + jnz %cmp2, @ret0, @loop +@reti + ret %i1 +@ret0 + ret 0 +} + +export +function w $main() { +@main +@loopy + %y =d phi @main d_-1, @loopy1 %y1 +@loopx + %x =d phi @loopy d_-1, @loopx1 %x1 + %i =w call $mandel(d %x, d %y) + jnz %i, @out, @in +@in + %r0 =w call $putchar(w 42) # '*' + jmp @loopx1 +@out + %r1 =w call $putchar(w 32) # ' ' + jmp @loopx1 +@loopx1 + %x1 =d add %x, d_0.032 + %cmp1 =w cgtd %x1, d_1 + jnz %cmp1, @loopy1, @loopx +@loopy1 + %r2 =w call $putchar(w 10) # '\n' + %y1 =d add %y, d_0.032 + %cmp2 =w cgtd %y1, d_1 + jnz %cmp2, @ret, @loopy +@ret + ret 0 +} + +# >>> output +# # +# # +# # +# # +# * # +# **** # +# **** # +# *** # +# ***** # +# ********* # +# ************ # +# ***************** # +# **************** # +# *************** # +# **************** # +# **************** # +# ***************** # +# **************** # +# **************** # +# ************** # +# ************* # +# ************ # +# ********* # +# ***** # +# *********** # +# ***************** # +# ********************** # +# * *********************** ** # +# *************************** # +# ***************************** # +# * ******************************* ** # +# ** *********************************** # +# *********************************** * # +# *********************************** # +# ************************************* # +# ************************************* # +# *************************************** # +# *************************************** # +# *************************************** # +# **************************************** # +# * **************************************** # +# ********************************************** **** # +# **************************************************** # +# * ***************************************************** # +# * ***************************************************** # +# ***** **************************************** **** # +# * **************************************** * # +# **************************************** # +# *************************************** # +# **************************************** # +# *************************************** # +# **************************************** # +# ************************************ # +# *********************************** # +# ********************************* # +# ************************************ # +# *** ************* ************** *** # +# *********** ************ ** # +# ******** ******** # +# ** * * # +# # +# # +# # +# <<< diff --git a/src/qbe/test/max.ssa b/src/qbe/test/max.ssa new file mode 100644 index 00000000..27fa8ca3 --- /dev/null +++ b/src/qbe/test/max.ssa @@ -0,0 +1,34 @@ +# find the maximum value +# in a nul-terminated array +# of unsigned bytes +# +# the output is stored in $a + +data $arr = { b 10, b -60, b 10, b 100, b 200, b 0 } + +export +function $test() { +@start +@loop + %max =w phi @start -1, @new %byt, @old %max + %loc =l phi @start $arr, @new %loc1, @old %loc1 + %byt =w loadub %loc + %loc1 =l add 1, %loc + jnz %byt, @iter, @end +@iter + %cmp =w cslew %max, %byt + jnz %cmp, @new, @old +@new + jmp @loop +@old + jmp @loop +@end + storew %max, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 200); } +# <<< diff --git a/src/qbe/test/mem1.ssa b/src/qbe/test/mem1.ssa new file mode 100644 index 00000000..b7045a62 --- /dev/null +++ b/src/qbe/test/mem1.ssa @@ -0,0 +1,35 @@ +type :i3 = { w 3 } + +export +function :i3 $blit() { +@start + %l0 =l alloc4 12 + %l1 =l alloc4 12 + + storew 287454020, %l0 + %l04 =l add %l0, 4 + storew 1432778632, %l04 + %l08 =l add %l0, 8 + storew 2578103244, %l08 + + # we expect that %l0 and %l1 + # are coalesced and the blit + # goes backwards + %l11 =l add %l1, 1 + blit %l0, %l11, 11 + + storeb 221, %l1 + + ret %l1 +} + +# >>> driver +# struct i3 { int a, b, c; }; +# extern struct i3 blit(); +# int main() { +# struct i3 s = blit(); +# return !(s.a == 0x223344dd +# && s.b == 0x66778811 +# && s.c == 0xaabbcc55); +# } +# <<< diff --git a/src/qbe/test/mem2.ssa b/src/qbe/test/mem2.ssa new file mode 100644 index 00000000..4a136b49 --- /dev/null +++ b/src/qbe/test/mem2.ssa @@ -0,0 +1,32 @@ +# Ember Sawady reported this bug +# in stack-slot coalescing + +type :t = { w 2 } + +function :t $func() { +@start.0 + %temp =l alloc4 4 + %ret =l alloc4 8 + storew 1, %temp + # storew can also go here + %field =l add %ret, 4 + storew 2, %ret + blit %temp, %field, 4 + # removing either of these storews causes it to work + storew 2, %ret + ret %ret +} + +export function w $main() { +@start + %ret =:t call $func() + %fptr =l add %ret, 4 + %field =w loaduw %fptr + %x =w ceqw %field, 1 + jnz %x, @passed, @failed +@failed + # this fails despite 1 => temp => ret + 4 => field + call $abort() +@passed + ret 0 +} diff --git a/src/qbe/test/mem3.ssa b/src/qbe/test/mem3.ssa new file mode 100644 index 00000000..a6cafd2e --- /dev/null +++ b/src/qbe/test/mem3.ssa @@ -0,0 +1,48 @@ +# Ember Sawady reported this bug +# in stack-slot coalescing + +type :type.3 = align 8 { l 1, l 1 } +type :tags.2 = { { :type.3 1 } } +type :type.1 = align 8 { w 1, :tags.2 1 } +type :tags.9 = { { w 1 } } +type :type.8 = align 4 { w 1, :tags.9 1 } + +function :type.1 $func() { +@start.0 + %object.5 =l alloc8 24 + %object.7 =l alloc4 8 + %binding.21 =l alloc8 16 + %object.23 =l alloc8 24 +@body.4 + %.10 =l add %object.7, 4 + jnz 1, @matches.13, @next.14 +@matches.13 + # binding.21 gets fused with object.23 + storel 1, %binding.21 + %value.22 =l add %binding.21, 8 + storel 2, %value.22 + %.24 =l add %object.23, 8 + # but the blit direction is not set correctly + blit %binding.21, %.24, 16 + ret %object.23 +@next.14 + storew 2543892678, %object.5 + ret %object.5 +} + +export function w $main() { +@start.27 + %object.43 =l alloc8 24 + %object.49 =l alloc8 24 +@body.28 + %returns.34 =:type.1 call $func() + %value.47 =l add %returns.34, 16 + %load.48 =l loadl %value.47 + %.33 =w ceql %load.48, 2 + jnz %.33, @passed.32, @failed.31 +@failed.31 + call $abort() +@passed.32 + ret 0 +} + diff --git a/src/qbe/test/philv.ssa b/src/qbe/test/philv.ssa new file mode 100644 index 00000000..f14106fb --- /dev/null +++ b/src/qbe/test/philv.ssa @@ -0,0 +1,34 @@ +# regression test for 1f4ff634 + +# warning! headaches can occur +# when trying to figure out what +# the test is doing! + +export +function w $t0() { +@start +@loop + %x0 =w phi @start 256, @loop %y0 + %y0 =w phi @start 128, @loop %y1 + %y1 =w shr %x0, 1 + jnz %y1, @loop, @end +@end + ret %x0 +} + +export +function w $t1() { # swapped phis +@start +@loop + %y0 =w phi @start 128, @loop %y1 + %x0 =w phi @start 256, @loop %y0 + %y1 =w shr %x0, 1 + jnz %y1, @loop, @end +@end + ret %x0 +} + +# >>> driver +# extern int t0(void), t1(void); +# int main() { return !(t0() == 1 && t1() == 1);} +# <<< diff --git a/src/qbe/test/prime.ssa b/src/qbe/test/prime.ssa new file mode 100644 index 00000000..2273e1d3 --- /dev/null +++ b/src/qbe/test/prime.ssa @@ -0,0 +1,33 @@ +# find the 10,001st prime +# store it in a + +export +function $test() { +@start +@loop + %n =w phi @start 5, @tloop %n, @yes %n1 + %p =w phi @start 13, @tloop %p1, @yes %p1 + %p1 =w add %p, 2 +@tloop + %t =w phi @loop 3, @next %t1 + %r =w rem %p, %t + jnz %r, @next, @loop +@next + %t1 =w add 2, %t + %tsq =w mul %t1, %t1 + %c0 =w csgtw %tsq, %p + jnz %c0, @yes, @tloop +@yes + %n1 =w add 1, %n + %c1 =w ceqw 10001, %n1 + jnz %c1, @end, @loop +@end + storew %p, $a + ret +} + +# >>> driver +# extern void test(void); +# int a; +# int main() { test(); return !(a == 104743); } +# <<< diff --git a/src/qbe/test/puts10.ssa b/src/qbe/test/puts10.ssa new file mode 100644 index 00000000..8c6ed5e6 --- /dev/null +++ b/src/qbe/test/puts10.ssa @@ -0,0 +1,30 @@ +export +function $main() { +@start + %y =l alloc4 4 + %y1 =l add %y, 1 + storeb 0, %y1 +@loop + %n =w phi @start 0, @loop %n1 + %c =w add %n, 48 + storeb %c, %y + %r =w call $puts(l %y) + %n1 =w add %n, 1 + %cmp =w cslew %n1, 9 + jnz %cmp, @loop, @end +@end + ret +} + +# >>> output +# 0 +# 1 +# 2 +# 3 +# 4 +# 5 +# 6 +# 7 +# 8 +# 9 +# <<< diff --git a/src/qbe/test/queen.ssa b/src/qbe/test/queen.ssa new file mode 100644 index 00000000..141e17e6 --- /dev/null +++ b/src/qbe/test/queen.ssa @@ -0,0 +1,282 @@ +# eight queens program +# generated by minic + +export function w $chk(w %t0, w %t1) { +@l0 + %x =l alloc4 4 + storew %t0, %x + %y =l alloc4 4 + storew %t1, %y + %i =l alloc4 4 + %r =l alloc4 4 + storew 0, %i + storew 0, %r +@l1 + %t6 =w loadw %i + %t7 =w loadw $glo1 + %t5 =w csltw %t6, %t7 + jnz %t5, @l2, @l3 +@l2 + %t10 =w loadw %r + %t15 =l loadl $glo3 + %t16 =w loadw %x + %t17 =l extsw %t16 + %t18 =l mul 8, %t17 + %t14 =l add %t15, %t18 + %t13 =l loadl %t14 + %t19 =w loadw %i + %t20 =l extsw %t19 + %t21 =l mul 4, %t20 + %t12 =l add %t13, %t21 + %t11 =w loadw %t12 + %t9 =w add %t10, %t11 + storew %t9, %r + %t24 =w loadw %r + %t29 =l loadl $glo3 + %t30 =w loadw %i + %t31 =l extsw %t30 + %t32 =l mul 8, %t31 + %t28 =l add %t29, %t32 + %t27 =l loadl %t28 + %t33 =w loadw %y + %t34 =l extsw %t33 + %t35 =l mul 4, %t34 + %t26 =l add %t27, %t35 + %t25 =w loadw %t26 + %t23 =w add %t24, %t25 + storew %t23, %r + %t39 =w loadw %x + %t40 =w loadw %i + %t38 =w add %t39, %t40 + %t41 =w loadw $glo1 + %t37 =w csltw %t38, %t41 + %t44 =w loadw %y + %t45 =w loadw %i + %t43 =w add %t44, %t45 + %t46 =w loadw $glo1 + %t42 =w csltw %t43, %t46 + %t36 =w and %t37, %t42 + jnz %t36, @l4, @l5 +@l4 + %t49 =w loadw %r + %t54 =l loadl $glo3 + %t56 =w loadw %x + %t57 =w loadw %i + %t55 =w add %t56, %t57 + %t58 =l extsw %t55 + %t59 =l mul 8, %t58 + %t53 =l add %t54, %t59 + %t52 =l loadl %t53 + %t61 =w loadw %y + %t62 =w loadw %i + %t60 =w add %t61, %t62 + %t63 =l extsw %t60 + %t64 =l mul 4, %t63 + %t51 =l add %t52, %t64 + %t50 =w loadw %t51 + %t48 =w add %t49, %t50 + storew %t48, %r +@l5 + %t68 =w loadw %x + %t69 =w loadw %i + %t67 =w add %t68, %t69 + %t70 =w loadw $glo1 + %t66 =w csltw %t67, %t70 + %t74 =w loadw %y + %t75 =w loadw %i + %t73 =w sub %t74, %t75 + %t71 =w cslew 0, %t73 + %t65 =w and %t66, %t71 + jnz %t65, @l7, @l8 +@l7 + %t78 =w loadw %r + %t83 =l loadl $glo3 + %t85 =w loadw %x + %t86 =w loadw %i + %t84 =w add %t85, %t86 + %t87 =l extsw %t84 + %t88 =l mul 8, %t87 + %t82 =l add %t83, %t88 + %t81 =l loadl %t82 + %t90 =w loadw %y + %t91 =w loadw %i + %t89 =w sub %t90, %t91 + %t92 =l extsw %t89 + %t93 =l mul 4, %t92 + %t80 =l add %t81, %t93 + %t79 =w loadw %t80 + %t77 =w add %t78, %t79 + storew %t77, %r +@l8 + %t98 =w loadw %x + %t99 =w loadw %i + %t97 =w sub %t98, %t99 + %t95 =w cslew 0, %t97 + %t102 =w loadw %y + %t103 =w loadw %i + %t101 =w add %t102, %t103 + %t104 =w loadw $glo1 + %t100 =w csltw %t101, %t104 + %t94 =w and %t95, %t100 + jnz %t94, @l10, @l11 +@l10 + %t107 =w loadw %r + %t112 =l loadl $glo3 + %t114 =w loadw %x + %t115 =w loadw %i + %t113 =w sub %t114, %t115 + %t116 =l extsw %t113 + %t117 =l mul 8, %t116 + %t111 =l add %t112, %t117 + %t110 =l loadl %t111 + %t119 =w loadw %y + %t120 =w loadw %i + %t118 =w add %t119, %t120 + %t121 =l extsw %t118 + %t122 =l mul 4, %t121 + %t109 =l add %t110, %t122 + %t108 =w loadw %t109 + %t106 =w add %t107, %t108 + storew %t106, %r +@l11 + %t127 =w loadw %x + %t128 =w loadw %i + %t126 =w sub %t127, %t128 + %t124 =w cslew 0, %t126 + %t132 =w loadw %y + %t133 =w loadw %i + %t131 =w sub %t132, %t133 + %t129 =w cslew 0, %t131 + %t123 =w and %t124, %t129 + jnz %t123, @l13, @l14 +@l13 + %t136 =w loadw %r + %t141 =l loadl $glo3 + %t143 =w loadw %x + %t144 =w loadw %i + %t142 =w sub %t143, %t144 + %t145 =l extsw %t142 + %t146 =l mul 8, %t145 + %t140 =l add %t141, %t146 + %t139 =l loadl %t140 + %t148 =w loadw %y + %t149 =w loadw %i + %t147 =w sub %t148, %t149 + %t150 =l extsw %t147 + %t151 =l mul 4, %t150 + %t138 =l add %t139, %t151 + %t137 =w loadw %t138 + %t135 =w add %t136, %t137 + storew %t135, %r +@l14 + %t153 =w loadw %i + %t152 =w add %t153, 1 + storew %t152, %i + jmp @l1 +@l3 + %t154 =w loadw %r + ret %t154 +} + +export function w $go(w %t0) { +@l16 + %y =l alloc4 4 + storew %t0, %y + %x =l alloc4 4 + %t2 =w loadw %y + %t3 =w loadw $glo1 + %t1 =w ceqw %t2, %t3 + jnz %t1, @l17, @l18 +@l17 + %t5 =w loadw $glo2 + %t4 =w add %t5, 1 + storew %t4, $glo2 + ret 0 +@l18 + storew 0, %x +@l20 + %t10 =w loadw %x + %t11 =w loadw $glo1 + %t9 =w csltw %t10, %t11 + jnz %t9, @l21, @l22 +@l21 + %t14 =w loadw %x + %t15 =w loadw %y + %t13 =w call $chk(w %t14, w %t15) + %t12 =w ceqw %t13, 0 + jnz %t12, @l23, @l24 +@l23 + %t21 =l loadl $glo3 + %t22 =w loadw %x + %t23 =l extsw %t22 + %t24 =l mul 8, %t23 + %t20 =l add %t21, %t24 + %t19 =l loadl %t20 + %t25 =w loadw %y + %t26 =l extsw %t25 + %t27 =l mul 4, %t26 + %t18 =l add %t19, %t27 + %t28 =w loadw %t18 + %t17 =w add %t28, 1 + storew %t17, %t18 + %t31 =w loadw %y + %t30 =w add %t31, 1 + %t29 =w call $go(w %t30) + %t37 =l loadl $glo3 + %t38 =w loadw %x + %t39 =l extsw %t38 + %t40 =l mul 8, %t39 + %t36 =l add %t37, %t40 + %t35 =l loadl %t36 + %t41 =w loadw %y + %t42 =l extsw %t41 + %t43 =l mul 4, %t42 + %t34 =l add %t35, %t43 + %t44 =w loadw %t34 + %t33 =w sub %t44, 1 + storew %t33, %t34 +@l24 + %t46 =w loadw %x + %t45 =w add %t46, 1 + storew %t45, %x + jmp @l20 +@l22 + ret 0 +} + +export function w $main() { +@l26 + %i =l alloc4 4 + storew 8, $glo1 + %t4 =w loadw $glo1 + %t3 =l call $calloc(w %t4, w 8) + storel %t3, $glo3 + storew 0, %i +@l27 + %t9 =w loadw %i + %t10 =w loadw $glo1 + %t8 =w csltw %t9, %t10 + jnz %t8, @l28, @l29 +@l28 + %t13 =w loadw $glo1 + %t12 =l call $calloc(w %t13, w 4) + %t16 =l loadl $glo3 + %t17 =w loadw %i + %t18 =l extsw %t17 + %t19 =l mul 8, %t18 + %t15 =l add %t16, %t19 + storel %t12, %t15 + %t21 =w loadw %i + %t20 =w add %t21, 1 + storew %t20, %i + jmp @l27 +@l29 + %t22 =w call $go(w 0) + %t25 =w loadw $glo2 + %t24 =w cnew %t25, 92 + ret %t24 +} + +data $glo1 = { w 0 } +data $glo2 = { w 0 } +data $glo3 = { l 0 } diff --git a/src/qbe/test/rega1.ssa b/src/qbe/test/rega1.ssa new file mode 100644 index 00000000..9e87c898 --- /dev/null +++ b/src/qbe/test/rega1.ssa @@ -0,0 +1,24 @@ +# tests that %b and %a0 do not end up in +# the same register at the start of @loop + +export function l $f(l %a) { +@start +@loop + %b =l phi @start 42, @loop0 %a1, @loop1 %a1 + %a0 =l phi @start %a, @loop0 %a1, @loop1 %a1 + %a1 =l sub %a0, 1 + jnz %b, @loop0, @loop1 +@loop0 + jnz %a1, @loop, @end +@loop1 + jnz %a1, @loop, @end +@end + ret %b +} + +# >>> driver +# extern long long f(long long); +# int main() { +# return !(f(1) == 42 && f(2) == 1 && f(42) == 1); +# } +# <<< diff --git a/src/qbe/test/spill1.ssa b/src/qbe/test/spill1.ssa new file mode 100644 index 00000000..21e98c2c --- /dev/null +++ b/src/qbe/test/spill1.ssa @@ -0,0 +1,68 @@ +export +function w $f(w %n0) { +@start +@loop + %n1 =w phi @start %n0, @loop %n2 + + %p0 =w phi @start 0, @loop %p01 + %p1 =w phi @start 0, @loop %p11 + %p2 =w phi @start 0, @loop %p21 + %p3 =w phi @start 0, @loop %p31 + %p4 =w phi @start 0, @loop %p41 + %p5 =w phi @start 0, @loop %p51 + %p6 =w phi @start 0, @loop %p61 + %p7 =w phi @start 0, @loop %p71 + %p8 =w phi @start 0, @loop %p81 + %p9 =w phi @start 0, @loop %p91 + %pa =w phi @start 0, @loop %pa1 + %pb =w phi @start 0, @loop %pb1 + %pc =w phi @start 0, @loop %pc1 + %pd =w phi @start 0, @loop %pd1 + %pe =w phi @start 0, @loop %pe1 + %pf =w phi @start 0, @loop %pf1 + + %p01 =w add 1, %p0 + %p11 =w add 2, %p1 + %p21 =w add 3, %p2 + %p31 =w add 4, %p3 + %p41 =w add 5, %p4 + %p51 =w add 6, %p5 + %p61 =w add 7, %p6 + %p71 =w add 8, %p7 + %p81 =w add 9, %p8 + %p91 =w add 10, %p9 + %pa1 =w add 11, %pa + %pb1 =w add 12, %pb + %pc1 =w add 13, %pc + %pd1 =w add 14, %pd + %pe1 =w add 15, %pe + %pf1 =w add 16, %pf + + %n2 =w sub %n1, 1 + jnz %n2, @loop, @end + +@end + %a =w sub 0, 0 + %a =w add %p01, %a + %a =w add %p11, %a + %a =w add %p21, %a + %a =w add %p31, %a + %a =w add %p41, %a + %a =w add %p51, %a + %a =w add %p61, %a + %a =w add %p71, %a + %a =w add %p81, %a + %a =w add %p91, %a + %a =w add %pa1, %a + %a =w add %pb1, %a + %a =w add %pc1, %a + %a =w add %pd1, %a + %a =w add %pe1, %a + %a =w add %pf1, %a + ret %a +} + +# >>> driver +# extern int f(int); +# int main() { return !(f(1) == 136); } +# <<< diff --git a/src/qbe/test/strcmp.ssa b/src/qbe/test/strcmp.ssa new file mode 100644 index 00000000..49568717 --- /dev/null +++ b/src/qbe/test/strcmp.ssa @@ -0,0 +1,63 @@ +# the C strcmp function generated by scc + +export function w $strcmp(l %s1.3.val,l %s2.5.val) +{ +@.37 + %s1.3 =l alloc8 8 + %s2.5 =l alloc8 8 + storel %s1.3.val,%s1.3 + storel %s2.5.val,%s2.5 + jmp @.5 +@.6 + %.9 =l loadl %s1.3 + %.10 =l add %.9,1 + storel %.10,%s1.3 + %.11 =l loadl %s2.5 + %.12 =l add %.11,1 + storel %.12,%s2.5 +@.5 + %.15 =l loadl %s1.3 + %.16 =w loadsb %.15 + %.17 =w extsb %.16 + %.18 =w cnew %.17,0 + jnz %.18,@.14,@.8 +@.14 + %.19 =l loadl %s2.5 + %.20 =w loadsb %.19 + %.21 =w extsb %.20 + %.22 =w cnew %.21,0 + jnz %.22,@.13,@.8 +@.13 + %.23 =l loadl %s1.3 + %.24 =w loadsb %.23 + %.25 =w extsb %.24 + %.26 =l loadl %s2.5 + %.27 =w loadsb %.26 + %.28 =w extsb %.27 + %.29 =w ceqw %.25,%.28 + jnz %.29,@.6,@.8 +@.8 +@.7 + %.30 =l loadl %s1.3 + %.31 =w loadub %.30 + %.32 =w extub %.31 + %.33 =l loadl %s2.5 + %.34 =w loadub %.33 + %.35 =w extub %.34 + %.36 =w sub %.32,%.35 + ret %.36 +} + +# >>> driver +# extern int strcmp(const char *, const char *); +# int main() { +# char a[] = "Hello world"; +# return !( +# strcmp(a, a) == 0 && +# strcmp("aaa", "aab") < 0 && +# strcmp("..cnn", "..bbc") > 0 && +# strcmp(a, "Hellp ...") < 0 && +# strcmp(a, "Hello vorld") > 0 +# ); +# } +# <<< diff --git a/src/qbe/test/strspn.ssa b/src/qbe/test/strspn.ssa new file mode 100644 index 00000000..a64ea19c --- /dev/null +++ b/src/qbe/test/strspn.ssa @@ -0,0 +1,77 @@ +# the C strspn function generated by scc + +export function w $strspn_(l %s1.81.val,l %s2.82.val) +{ +@.64 + %s1.81 =l alloc8 8 + %s2.82 =l alloc8 8 + %n.83 =l alloc4 4 + %c.84 =l alloc4 4 + %p.85 =l alloc8 8 + storel %s1.81.val,%s1.81 + storel %s2.82.val,%s2.82 + storew 0,%n.83 + jmp @.27 +@.28 + %.39 =l loadl %s2.82 + storel %.39,%p.85 + jmp @.29 +@.30 +@.31 + %.40 =l loadl %p.85 + %.41 =l add %.40,1 + storel %.41,%p.85 +@.29 + %.43 =l loadl %p.85 + %.44 =w loadsb %.43 + %.45 =w extsb %.44 + %.46 =w cnew %.45,0 + jnz %.46,@.42,@.36 +@.42 + %.47 =l loadl %p.85 + %.48 =w loadsb %.47 + %.49 =w extsb %.48 + %.50 =w loadsw %c.84 + %.51 =w cnew %.49,%.50 + jnz %.51,@.30,@.36 +@.36 +@.32 + %.52 =l loadl %p.85 + %.53 =w loadsb %.52 + %.54 =w extsb %.53 + %.55 =w cnew %.54,0 + jnz %.55,@.33,@.37 +@.37 + jmp @.34 +@.33 +@.35 + %.56 =w loaduw %n.83 + %.57 =w add %.56,1 + storew %.57,%n.83 +@.27 + %.58 =l loadl %s1.81 + %.59 =l add %.58,1 + storel %.59,%s1.81 + %.60 =w loadsb %.58 + %.61 =w extsb %.60 + storew %.61,%c.84 + %.62 =w cnew %.61,0 + jnz %.62,@.28,@.38 +@.38 +@.34 + %.63 =w loaduw %n.83 + ret %.63 +} + +# >>> driver +# extern unsigned strspn_(const char *, const char *); +# int main() { +# return !( +# strspn_("", "abc") == 0 && +# strspn_("abc", "") == 0 && +# strspn_("abc", "bac") == 3 && +# strspn_("xabc", "bac") == 0 && +# strspn_("axbc", "bca") == 1 +# ); +# } +# <<< diff --git a/src/qbe/test/sum.ssa b/src/qbe/test/sum.ssa new file mode 100644 index 00000000..08ba8c09 --- /dev/null +++ b/src/qbe/test/sum.ssa @@ -0,0 +1,32 @@ +# Simple test for addressing modes. + +export +function w $sum(l %arr, w %num) { +@start +@loop + %n1 =w phi @start %num, @loop1 %n2 + %s0 =w phi @start 0, @loop1 %s1 + %n2 =w sub %n1, 1 + %c =w cslew %n1, 0 + jnz %c, @end, @loop1 +@loop1 + %idx0 =l extsw %n2 + %idx1 =l mul 4, %idx0 + %idx2 =l add %idx1, %arr + %w =w loadw %idx2 + %s1 =w add %w, %s0 + jmp @loop +@end + ret %s0 +} + +# >>> driver +# extern int sum(int *, int); +# int arr[] = { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21 }; +# #define N sizeof arr / sizeof arr[0] +# int main() { +# int i, s; +# for (s=i=0; i>> output +# i0==42 +# i1==402653226 +# *(x+0)==1 +# *(x+4)==2 +# *(x+8)==3 +# *(x+12)==4 +# <<< diff --git a/src/qbe/test/vararg1.ssa b/src/qbe/test/vararg1.ssa new file mode 100644 index 00000000..3b33890b --- /dev/null +++ b/src/qbe/test/vararg1.ssa @@ -0,0 +1,35 @@ +export +function d $f(l %x, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %i =l vaarg %vp + %n =d vaarg %vp + ret %n +} + +export +function w $g(l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret %r +} + +# >>> driver +# #include +# #include +# extern double f(int, ...); +# extern int g(char *, ...); +# int print(const char *fmt, va_list *ap) { +# return vprintf(fmt, *ap); +# } +# int main() { +# g("Hell%c %s %g!\n", 'o', "world", f(42, "x", 42.0)); +# } +# <<< + +# >>> output +# Hello world 42! +# <<< diff --git a/src/qbe/test/vararg2.ssa b/src/qbe/test/vararg2.ssa new file mode 100644 index 00000000..cecb20a5 --- /dev/null +++ b/src/qbe/test/vararg2.ssa @@ -0,0 +1,684 @@ +export function $qbeprint0(l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall0(l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint1(w %argw0, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall1(w %argw0, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint2(d %argd0, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall2(d %argd0, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint3(w %argw0, w %argw1, w %argw2, w %argw3, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall3(w %argw0, w %argw1, w %argw2, w %argw3, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint4(d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall4(d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint5(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, d %argd6, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall5(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, d %argd6, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint6(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, w %argw5, w %argw6, w %argw7, w %argw8, w %argw9, d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, d %argd6, d %argd7, d %argd8, d %argd9, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall6(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, w %argw5, w %argw6, w %argw7, w %argw8, w %argw9, d %argd0, d %argd1, d %argd2, d %argd3, d %argd4, d %argd5, d %argd6, d %argd7, d %argd8, d %argd9, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +export function $qbeprint7(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, w %argw5, w %argw6, w %argw7, w %argw8, l %fmt, ...) { +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew 2122789, %fmtint + storew 2123557, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +} + +export function $qbecall7(w %argw0, w %argw1, w %argw2, w %argw3, w %argw4, w %argw5, w %argw6, w %argw7, w %argw8, l %fmt, ...) { +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} + +# >>> driver +# #include +# #include +# extern void qbeprint0(char *, ...); +# extern void qbecall0(char *, ...); +# extern void qbeprint1(int argw0, char *, ...); +# extern void qbecall1(int argw0, char *, ...); +# extern void qbeprint2(double argd0, char *, ...); +# extern void qbecall2(double argd0, char *, ...); +# extern void qbeprint3(int argw0, int argw1, int argw2, int argw3, char *, ...); +# extern void qbecall3(int argw0, int argw1, int argw2, int argw3, char *, ...); +# extern void qbeprint4(double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, char *, ...); +# extern void qbecall4(double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, char *, ...); +# extern void qbeprint5(int argw0, int argw1, int argw2, int argw3, int argw4, double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, double argd6, char *, ...); +# extern void qbecall5(int argw0, int argw1, int argw2, int argw3, int argw4, double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, double argd6, char *, ...); +# extern void qbeprint6(int argw0, int argw1, int argw2, int argw3, int argw4, int argw5, int argw6, int argw7, int argw8, int argw9, double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, double argd6, double argd7, double argd8, double argd9, char *, ...); +# extern void qbecall6(int argw0, int argw1, int argw2, int argw3, int argw4, int argw5, int argw6, int argw7, int argw8, int argw9, double argd0, double argd1, double argd2, double argd3, double argd4, double argd5, double argd6, double argd7, double argd8, double argd9, char *, ...); +# extern void qbeprint7(int argw0, int argw1, int argw2, int argw3, int argw4, int argw5, int argw6, int argw7, int argw8, char *, ...); +# extern void qbecall7(int argw0, int argw1, int argw2, int argw3, int argw4, int argw5, int argw6, int argw7, int argw8, char *, ...); +# int print(char *fmt, va_list *ap) { +# return vprintf(fmt, *ap); +# } +# int main() { +# puts("# (0 int, 0 double)"); +# qbeprint0("%d \n", 3); +# qbecall0("%d \n", 3); +# qbeprint0("%g \n", -9.5); +# qbecall0("%g \n", -9.5); +# qbeprint0("%d %g \n", -5, -5.536); +# qbecall0("%d %g \n", -5, -5.536); +# qbeprint0("%g %g \n", 4.729, 3.534); +# qbecall0("%g %g \n", 4.729, 3.534); +# qbeprint0("%d %d %d %d \n", 8, -9, -2, -10); +# qbecall0("%d %d %d %d \n", 8, -9, -2, -10); +# qbeprint0("%g %g %g %g \n", -5.627, 0.1071, -9.469, -6.023); +# qbecall0("%g %g %g %g \n", -5.627, 0.1071, -9.469, -6.023); +# qbeprint0("%d %g %d %g \n", 3, 0.8988, -6, 1.785); +# qbecall0("%d %g %d %g \n", 3, 0.8988, -6, 1.785); +# qbeprint0("%g %g %d %d \n", 6.189, -9.87, 6, 4); +# qbecall0("%g %g %d %d \n", 6.189, -9.87, 6, 4); +# qbeprint0("%d %d %g %g \n", -3, -7, 9.144, -3.268); +# qbecall0("%d %d %g %g \n", -3, -7, 9.144, -3.268); +# qbeprint0("\n"); +# qbecall0("\n"); +# puts("# (1 int, 0 double)"); +# qbeprint1(0, "%d \n", -9); +# qbecall1(0, "%d \n", -9); +# qbeprint1(0, "%g \n", -8.066); +# qbecall1(0, "%g \n", -8.066); +# qbeprint1(0, "%d %g \n", 7, 2.075); +# qbecall1(0, "%d %g \n", 7, 2.075); +# qbeprint1(0, "%g %g \n", 6.143, 4.595); +# qbecall1(0, "%g %g \n", 6.143, 4.595); +# qbeprint1(0, "%d %d %d %d \n", 1, 10, -3, 1); +# qbecall1(0, "%d %d %d %d \n", 1, 10, -3, 1); +# qbeprint1(0, "%g %g %g %g \n", 6.588, 2.37, 7.234, 1.547); +# qbecall1(0, "%g %g %g %g \n", 6.588, 2.37, 7.234, 1.547); +# qbeprint1(0, "%d %g %d %g \n", 4, -9.084, -6, -4.212); +# qbecall1(0, "%d %g %d %g \n", 4, -9.084, -6, -4.212); +# qbeprint1(0, "%g %g %d %d \n", -8.404, -5.344, -8, -5); +# qbecall1(0, "%g %g %d %d \n", -8.404, -5.344, -8, -5); +# qbeprint1(0, "%d %d %g %g \n", 3, -3, -2.596, -5.81); +# qbecall1(0, "%d %d %g %g \n", 3, -3, -2.596, -5.81); +# qbeprint1(0, "\n"); +# qbecall1(0, "\n"); +# puts("# (0 int, 1 double)"); +# qbeprint2(0, "%d \n", -5); +# qbecall2(0, "%d \n", -5); +# qbeprint2(0, "%g \n", 8.733); +# qbecall2(0, "%g \n", 8.733); +# qbeprint2(0, "%d %g \n", 3, 2.183); +# qbecall2(0, "%d %g \n", 3, 2.183); +# qbeprint2(0, "%g %g \n", -6.577, 4.583); +# qbecall2(0, "%g %g \n", -6.577, 4.583); +# qbeprint2(0, "%d %d %d %d \n", -7, -3, 10, 3); +# qbecall2(0, "%d %d %d %d \n", -7, -3, 10, 3); +# qbeprint2(0, "%g %g %g %g \n", 1.139, 3.692, 6.857, 5.52); +# qbecall2(0, "%g %g %g %g \n", 1.139, 3.692, 6.857, 5.52); +# qbeprint2(0, "%d %g %d %g \n", -6, -9.358, -4, -4.645); +# qbecall2(0, "%d %g %d %g \n", -6, -9.358, -4, -4.645); +# qbeprint2(0, "%g %g %d %d \n", -5.78, 8.858, 8, -4); +# qbecall2(0, "%g %g %d %d \n", -5.78, 8.858, 8, -4); +# qbeprint2(0, "%d %d %g %g \n", 3, -2, 8.291, -0.823); +# qbecall2(0, "%d %d %g %g \n", 3, -2, 8.291, -0.823); +# qbeprint2(0, "\n"); +# qbecall2(0, "\n"); +# puts("# (4 int, 0 double)"); +# qbeprint3(0, 0, 0, 0, "%d \n", -5); +# qbecall3(0, 0, 0, 0, "%d \n", -5); +# qbeprint3(0, 0, 0, 0, "%g \n", -5.067); +# qbecall3(0, 0, 0, 0, "%g \n", -5.067); +# qbeprint3(0, 0, 0, 0, "%d %g \n", 1, -4.745); +# qbecall3(0, 0, 0, 0, "%d %g \n", 1, -4.745); +# qbeprint3(0, 0, 0, 0, "%g %g \n", 1.692, 7.956); +# qbecall3(0, 0, 0, 0, "%g %g \n", 1.692, 7.956); +# qbeprint3(0, 0, 0, 0, "%d %d %d %d \n", -2, -6, 10, 0); +# qbecall3(0, 0, 0, 0, "%d %d %d %d \n", -2, -6, 10, 0); +# qbeprint3(0, 0, 0, 0, "%g %g %g %g \n", -8.182, -9.058, -7.807, 2.549); +# qbecall3(0, 0, 0, 0, "%g %g %g %g \n", -8.182, -9.058, -7.807, 2.549); +# qbeprint3(0, 0, 0, 0, "%d %g %d %g \n", 6, -1.557, -9, -2.368); +# qbecall3(0, 0, 0, 0, "%d %g %d %g \n", 6, -1.557, -9, -2.368); +# qbeprint3(0, 0, 0, 0, "%g %g %d %d \n", 9.922, 0.5823, 10, 8); +# qbecall3(0, 0, 0, 0, "%g %g %d %d \n", 9.922, 0.5823, 10, 8); +# qbeprint3(0, 0, 0, 0, "%d %d %g %g \n", -10, 5, 3.634, 0.7394); +# qbecall3(0, 0, 0, 0, "%d %d %g %g \n", -10, 5, 3.634, 0.7394); +# qbeprint3(0, 0, 0, 0, "\n"); +# qbecall3(0, 0, 0, 0, "\n"); +# puts("# (0 int, 6 double)"); +# qbeprint4(0, 0, 0, 0, 0, 0, "%d \n", -5); +# qbecall4(0, 0, 0, 0, 0, 0, "%d \n", -5); +# qbeprint4(0, 0, 0, 0, 0, 0, "%g \n", 2.819); +# qbecall4(0, 0, 0, 0, 0, 0, "%g \n", 2.819); +# qbeprint4(0, 0, 0, 0, 0, 0, "%d %g \n", -8, -1.305); +# qbecall4(0, 0, 0, 0, 0, 0, "%d %g \n", -8, -1.305); +# qbeprint4(0, 0, 0, 0, 0, 0, "%g %g \n", -0.9255, 9.076); +# qbecall4(0, 0, 0, 0, 0, 0, "%g %g \n", -0.9255, 9.076); +# qbeprint4(0, 0, 0, 0, 0, 0, "%d %d %d %d \n", 8, -5, 0, -7); +# qbecall4(0, 0, 0, 0, 0, 0, "%d %d %d %d \n", 8, -5, 0, -7); +# qbeprint4(0, 0, 0, 0, 0, 0, "%g %g %g %g \n", 8.253, 7.41, -4.031, 2.779); +# qbecall4(0, 0, 0, 0, 0, 0, "%g %g %g %g \n", 8.253, 7.41, -4.031, 2.779); +# qbeprint4(0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 2, -6.943, 6, 0.7876); +# qbecall4(0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 2, -6.943, 6, 0.7876); +# qbeprint4(0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 5.573, 0.6071, -10, -4); +# qbecall4(0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 5.573, 0.6071, -10, -4); +# qbeprint4(0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -10, 9, 7.574, 6.633); +# qbecall4(0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -10, 9, 7.574, 6.633); +# qbeprint4(0, 0, 0, 0, 0, 0, "\n"); +# qbecall4(0, 0, 0, 0, 0, 0, "\n"); +# puts("# (5 int, 7 double)"); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", -4); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", -4); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", -8.841); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", -8.841); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", 8, 8.939); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", 8, 8.939); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", -8.287, -0.2802); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", -8.287, -0.2802); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -9, 5, 6, -8); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -9, 5, 6, -8); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -0.4944, 0.9961, -4.699, 7.449); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -0.4944, 0.9961, -4.699, 7.449); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", -2, -5.764, 1, 4.599); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", -2, -5.764, 1, 4.599); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", -5.977, -3.766, 10, 3); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", -5.977, -3.766, 10, 3); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -1, 0, -7.58, -5.506); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -1, 0, -7.58, -5.506); +# qbeprint5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# qbecall5(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# puts("# (10 int, 10 double)"); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", -3); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", -3); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", 1.766); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", 1.766); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", -6, -5.596); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", -6, -5.596); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", -8.58, 2.622); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", -8.58, 2.622); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -6, 9, 8, -9); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -6, 9, 8, -9); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -5.24, 3.38, -5.715, -7.354); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -5.24, 3.38, -5.715, -7.354); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 9, 1.421, -1, 5.692); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 9, 1.421, -1, 5.692); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 6.15, -6.192, -8, -1); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 6.15, -6.192, -8, -1); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -2, -1, 4.582, 3.467); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", -2, -1, 4.582, 3.467); +# qbeprint6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# qbecall6(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# puts("# (9 int, 0 double)"); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", 10); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d \n", 10); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", -8.032); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g \n", -8.032); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", -2, -3.214); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g \n", -2, -3.214); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", 7.233, -5.027); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g \n", 7.233, -5.027); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -7, -1, -2, -5); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %d %d \n", -7, -1, -2, -5); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -5.004, 8.465, -1.137, 7.227); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %g %g \n", -5.004, 8.465, -1.137, 7.227); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 1, -8.988, 10, 6.721); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %g %d %g \n", 1, -8.988, 10, 6.721); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 9.38, 8.527, 7, -7); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%g %g %d %d \n", 9.38, 8.527, 7, -7); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", 0, -6, -1.979, -8.827); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "%d %d %g %g \n", 0, -6, -1.979, -8.827); +# qbeprint7(0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# qbecall7(0, 0, 0, 0, 0, 0, 0, 0, 0, "\n"); +# } +# <<< + +# >>> output +# # (0 int, 0 double) +# 3 +# 3 +# -9.5 +# -9.5 +# -5 -5.536 +# -5 -5.536 +# 4.729 3.534 +# 4.729 3.534 +# 8 -9 -2 -10 +# 8 -9 -2 -10 +# -5.627 0.1071 -9.469 -6.023 +# -5.627 0.1071 -9.469 -6.023 +# 3 0.8988 -6 1.785 +# 3 0.8988 -6 1.785 +# 6.189 -9.87 6 4 +# 6.189 -9.87 6 4 +# -3 -7 9.144 -3.268 +# -3 -7 9.144 -3.268 +# +# +# # (1 int, 0 double) +# -9 +# -9 +# -8.066 +# -8.066 +# 7 2.075 +# 7 2.075 +# 6.143 4.595 +# 6.143 4.595 +# 1 10 -3 1 +# 1 10 -3 1 +# 6.588 2.37 7.234 1.547 +# 6.588 2.37 7.234 1.547 +# 4 -9.084 -6 -4.212 +# 4 -9.084 -6 -4.212 +# -8.404 -5.344 -8 -5 +# -8.404 -5.344 -8 -5 +# 3 -3 -2.596 -5.81 +# 3 -3 -2.596 -5.81 +# +# +# # (0 int, 1 double) +# -5 +# -5 +# 8.733 +# 8.733 +# 3 2.183 +# 3 2.183 +# -6.577 4.583 +# -6.577 4.583 +# -7 -3 10 3 +# -7 -3 10 3 +# 1.139 3.692 6.857 5.52 +# 1.139 3.692 6.857 5.52 +# -6 -9.358 -4 -4.645 +# -6 -9.358 -4 -4.645 +# -5.78 8.858 8 -4 +# -5.78 8.858 8 -4 +# 3 -2 8.291 -0.823 +# 3 -2 8.291 -0.823 +# +# +# # (4 int, 0 double) +# -5 +# -5 +# -5.067 +# -5.067 +# 1 -4.745 +# 1 -4.745 +# 1.692 7.956 +# 1.692 7.956 +# -2 -6 10 0 +# -2 -6 10 0 +# -8.182 -9.058 -7.807 2.549 +# -8.182 -9.058 -7.807 2.549 +# 6 -1.557 -9 -2.368 +# 6 -1.557 -9 -2.368 +# 9.922 0.5823 10 8 +# 9.922 0.5823 10 8 +# -10 5 3.634 0.7394 +# -10 5 3.634 0.7394 +# +# +# # (0 int, 6 double) +# -5 +# -5 +# 2.819 +# 2.819 +# -8 -1.305 +# -8 -1.305 +# -0.9255 9.076 +# -0.9255 9.076 +# 8 -5 0 -7 +# 8 -5 0 -7 +# 8.253 7.41 -4.031 2.779 +# 8.253 7.41 -4.031 2.779 +# 2 -6.943 6 0.7876 +# 2 -6.943 6 0.7876 +# 5.573 0.6071 -10 -4 +# 5.573 0.6071 -10 -4 +# -10 9 7.574 6.633 +# -10 9 7.574 6.633 +# +# +# # (5 int, 7 double) +# -4 +# -4 +# -8.841 +# -8.841 +# 8 8.939 +# 8 8.939 +# -8.287 -0.2802 +# -8.287 -0.2802 +# -9 5 6 -8 +# -9 5 6 -8 +# -0.4944 0.9961 -4.699 7.449 +# -0.4944 0.9961 -4.699 7.449 +# -2 -5.764 1 4.599 +# -2 -5.764 1 4.599 +# -5.977 -3.766 10 3 +# -5.977 -3.766 10 3 +# -1 0 -7.58 -5.506 +# -1 0 -7.58 -5.506 +# +# +# # (10 int, 10 double) +# -3 +# -3 +# 1.766 +# 1.766 +# -6 -5.596 +# -6 -5.596 +# -8.58 2.622 +# -8.58 2.622 +# -6 9 8 -9 +# -6 9 8 -9 +# -5.24 3.38 -5.715 -7.354 +# -5.24 3.38 -5.715 -7.354 +# 9 1.421 -1 5.692 +# 9 1.421 -1 5.692 +# 6.15 -6.192 -8 -1 +# 6.15 -6.192 -8 -1 +# -2 -1 4.582 3.467 +# -2 -1 4.582 3.467 +# +# +# # (9 int, 0 double) +# 10 +# 10 +# -8.032 +# -8.032 +# -2 -3.214 +# -2 -3.214 +# 7.233 -5.027 +# 7.233 -5.027 +# -7 -1 -2 -5 +# -7 -1 -2 -5 +# -5.004 8.465 -1.137 7.227 +# -5.004 8.465 -1.137 7.227 +# 1 -8.988 10 6.721 +# 1 -8.988 10 6.721 +# 9.38 8.527 7 -7 +# 9.38 8.527 7 -7 +# 0 -6 -1.979 -8.827 +# 0 -6 -1.979 -8.827 +# +# +# <<< diff --git a/src/qbe/tools/abi8.py b/src/qbe/tools/abi8.py new file mode 100755 index 00000000..4616ca5a --- /dev/null +++ b/src/qbe/tools/abi8.py @@ -0,0 +1,110 @@ +#!/usr/bin/python3 + +# support script to create +# the abi8.ssa test + +def ctype(arg): + if arg[0] == 'p': return ctype(arg[1:]) + if arg[0] == ':': return 'S' + arg[1:] + return {'w':'int', 'l':'long', + 's':'float', 'd':'double'}[arg] + +def cparam(iarg): + return ctype(iarg[1]) + ' p' + str(iarg[0]) + +def gencfn(id, args): + out = '# extern void qfn' + id + '(' + out += ', '.join(map(ctype, args)) + ');\n' + out += '# void cfn' + id + '(' + out += ', '.join(map(cparam, enumerate(args))) + out += ') {\n' + out += '# \tprintf("qbe->c(%d)", ' + id + ');\n' + out += '# \t' + for (i, arg) in enumerate(args): + if arg[0] != 'p': continue + ty = arg[1:] + if ty[0] == ':': + out += 'p' + ty[1:] + '(&' + else: + out += 'p' + ty + '(' + out += 'p' + str(i) + '); ' + out += 'puts("");\n' + out += '# \tqfn' + id + '(' + out += ', '.join('p'+str(i) for i in range(len(args))) + out += ');\n' + out += '# }\n' + return out + +def qparam(iarg): + ty = iarg[1][1:] if iarg[1][0] == 'p' else iarg[1] + return ty + ' %p' + str(iarg[0]) + +def genqfn(id, args): + out = 'export\nfunction $qfn' + id + '(' + out += ', '.join(map(qparam, enumerate(args))) + out += ') {\n' + out += '@start\n' + out += '\t%r0 =w call $printf(l $ctoqbestr, w ' + id + ')\n' + for (i, arg) in enumerate(args): + if arg[0] != 'p': continue + ty = arg[1:] + if ty[0] == ':': + out += '\tcall $p' + ty[1:] + out += '(l %p' + str(i) + ')\n' + else: + out += '\tcall $p' + ty + out += '(' + ty + ' %p' + str(i) + ')\n' + out += '\t%r1 =w call $puts(l $emptystr)\n' + out += '\tret\n' + out += '}\n' + return out + +def carg(iarg): + i, arg = iarg + print = arg[0] == 'p' + ty = arg if not print else arg[1:] + if ty[0] == ':': + if print: + return ty + ' $' + ty[1:] + else: + return ty + ' $z' + ty[1:] + if not print: + return ty + ' 0' + if ty == 'w' or ty == 'l': + return ty + ' ' + str(i+1) + if ty == 's' or ty == 'd': + flt = str(i+1) + '.' + str(i+1) + return ty + ' ' + ty + '_' + flt + +def genmaincall(id, args): + out = '\tcall $cfn' + id + '(' + out += ', '.join(map(carg, enumerate(args))) + out += ')\n' + return out + +def gen(tvec): + for i, t in enumerate(tvec): + print(genqfn(str(i), t), end='') + print('') + for i, t in enumerate(tvec): + print(genmaincall(str(i), t), end='') + print('') + for i, t in enumerate(tvec): + print(gencfn(str(i), t), end='') + +TVEC = [ + ['s']*8 + ['ps'], + ['pw', 'ps', 'p:fi1'], + ['pw', 'p:fi2', 'ps'], + ['pw', 'ps', 'p:fi3'], + ['p:ss'], + ['d']*7 + ['p:ss', 'ps', 'pl'], + ['p:lb'], + ['w']*7 + ['p:lb'], + ['w']*8 + ['p:lb'], + [ 'p:big' ], + ['w']*8 + ['p:big', 'ps', 'pl'], +] + +if __name__ == '__main__': + gen(TVEC) diff --git a/src/qbe/tools/abifuzz.sh b/src/qbe/tools/abifuzz.sh new file mode 100755 index 00000000..add56ebb --- /dev/null +++ b/src/qbe/tools/abifuzz.sh @@ -0,0 +1,107 @@ +#!/bin/sh + +OCAMLC=${OCAMLC:-/usr/bin/ocamlc} +DIR=`cd $(dirname "$0"); pwd` +QBE=$DIR/../qbe + +failure() { + echo "Failure at stage:" $1 >&2 + exit 1 +} + +cleanup() { + rm -fr $TMP +} + +init() { + cp $DIR/callgen.ml $TMP + pushd $TMP > /dev/null + + cat > Makefile << EOM + +.PHONY: test +test: caller.o callee.o + c99 -o \$@ caller.o callee.o +%.o: %.c + c99 -c -o \$@ \$< +%.o: %.ssa + $QBE -o \$*.s \$< + c99 -c -o \$@ \$*.s + +EOM + + if ! $OCAMLC callgen.ml -o callgen + then + popd > /dev/null + cleanup + failure "abifuzz compilation" + fi + popd > /dev/null +} + +once() { + if test -z "$3" + then + $TMP/callgen $TMP $1 $2 + else + $TMP/callgen -s $3 $TMP $1 $2 + fi + make -C $TMP test > /dev/null || failure "building" + $TMP/test || failure "runtime" +} + +usage() { + echo "usage: abitest.sh [-callssa] [-callc] [-s SEED] [-n ITERATIONS]" >&2 + exit 1 +} + +N=1 +CALLER=c +CALLEE=ssa + +while test -n "$1" +do + case "$1" in + "-callssa") + CALLER=c + CALLEE=ssa + ;; + "-callc") + CALLER=ssa + CALLEE=c + ;; + "-s") + test -n "$2" || usage + shift + SEED="$1" + ;; + "-n") + test -n "$2" || usage + shift + N="$1" + ;; + *) + usage + ;; + esac + shift +done + +TMP=`mktemp -d abifuzz.XXXXXX` + +init + +if test -n "$S" +then + once $CALLER $CALLEE $SEED +else + for n in `seq $N` + do + once $CALLER $CALLEE + echo "$n" | grep "00$" + done +fi + +echo "All done." + +cleanup diff --git a/src/qbe/tools/callgen.ml b/src/qbe/tools/callgen.ml new file mode 100644 index 00000000..d53eabb9 --- /dev/null +++ b/src/qbe/tools/callgen.ml @@ -0,0 +1,535 @@ +(* abi fuzzer, generates two modules one calling + * the other in two possibly different languages + *) + +type _ bty = + | Char: int bty + | Short: int bty + | Int: int bty + | Long: int bty + | Float: float bty + | Double: float bty + +type _ sty = + | Field: 'a bty * 'b sty -> ('a * 'b) sty + | Empty: unit sty + +type _ aty = + | Base: 'a bty -> 'a aty + | Struct: 'a sty -> 'a aty + +type anyb = AB: _ bty -> anyb (* kinda boring... *) +type anys = AS: _ sty -> anys +type anya = AA: _ aty -> anya +type testb = TB: 'a bty * 'a -> testb +type testa = TA: 'a aty * 'a -> testa + + +let align a x = + let m = x mod a in + if m <> 0 then x + (a-m) else x + +let btysize: type a. a bty -> int = function + | Char -> 1 + | Short -> 2 + | Int -> 4 + | Long -> 8 + | Float -> 4 + | Double -> 8 + +let btyalign = btysize + +let styempty: type a. a sty -> bool = function + | Field _ -> false + | Empty -> true + +let stysize s = + let rec f: type a. int -> a sty -> int = + fun sz -> function + | Field (b, s) -> + let a = btyalign b in + f (align a sz + btysize b) s + | Empty -> sz in + f 0 s + +let rec styalign: type a. a sty -> int = function + | Field (b, s) -> max (btyalign b) (styalign s) + | Empty -> 1 + + +(* Generate types and test vectors. *) +module Gen = struct + module R = Random + + let init = function + | None -> + let f = open_in "/dev/urandom" in + let seed = + Char.code (input_char f) lsl 16 + + Char.code (input_char f) lsl 8 + + Char.code (input_char f) in + close_in f; + R.init seed; + seed + | Some seed -> + R.init seed; + seed + + let int sz = + let bound = 1 lsl (8 * min sz 3 - 1) in + let i = R.int bound in + if R.bool () then - i else i + + let float () = + let f = R.float 1000. in + if R.bool () then -. f else f + + let testv: type a. a aty -> a = + let tb: type a. a bty -> a = function (* eh, dry... *) + | Float -> float () + | Double -> float () + | Char -> int (btysize Char) + | Short -> int (btysize Short) + | Int -> int (btysize Int) + | Long -> int (btysize Long) in + let rec ts: type a. a sty -> a = function + | Field (b, s) -> (tb b, ts s) + | Empty -> () in + function + | Base b -> tb b + | Struct s -> ts s + + let b () = (* uniform *) + match R.int 6 with + | 0 -> AB Char + | 1 -> AB Short + | 2 -> AB Int + | 3 -> AB Long + | 4 -> AB Float + | _ -> AB Double + + let smax = 5 (* max elements in structs *) + let structp = 0.3 (* odds of having a struct type *) + let amax = 8 (* max function arguments *) + + let s () = + let rec f n = + if n = 0 then AS Empty else + let AB bt = b () in + let AS st = f (n-1) in + AS (Field (bt, st)) in + f (1 + R.int (smax-1)) + + let a () = + if R.float 1.0 > structp then + let AB bt = b () in + AA (Base bt) + else + let AB bt = b () in + let AS st = s () in + AA (Struct (Field (bt, st))) + + let test () = + let AA ty = a () in + let t = testv ty in + TA (ty, t) + + let tests () = + let rec f n = + if n = 0 then [] else + test () :: f (n-1) in + f (R.int amax) + +end + + +(* Code generation for C *) +module OutC = struct + open Printf + + let ctypelong oc name = + let cb: type a. a bty -> unit = function + | Char -> fprintf oc "char" + | Short -> fprintf oc "short" + | Int -> fprintf oc "int" + | Long -> fprintf oc "long" + | Float -> fprintf oc "float" + | Double -> fprintf oc "double" in + let rec cs: type a. int -> a sty -> unit = + fun i -> function + | Field (b, s) -> + cb b; + fprintf oc " f%d; " i; + cs (i+1) s; + | Empty -> () in + function + | Base b -> + cb b; + | Struct s -> + fprintf oc "struct %s { " name; + cs 1 s; + fprintf oc "}"; + () + + let ctype: type a. out_channel -> string -> a aty -> unit = + fun oc name -> function + | Struct _ -> fprintf oc "struct %s" name + | t -> ctypelong oc "" t + + let base: type a. out_channel -> a bty * a -> unit = + fun oc -> function + | Char, i -> fprintf oc "%d" i + | Short, i -> fprintf oc "%d" i + | Int, i -> fprintf oc "%d" i + | Long, i -> fprintf oc "%d" i + | Float, f -> fprintf oc "%ff" f + | Double, f -> fprintf oc "%f" f + + let init oc name (TA (ty, t)) = + let inits s = + let rec f: type a. a sty * a -> unit = function + | Field (b, s), (tb, ts) -> + base oc (b, tb); + fprintf oc ", "; + f (s, ts) + | Empty, () -> () in + fprintf oc "{ "; + f s; + fprintf oc "}"; in + ctype oc name ty; + fprintf oc " %s = " name; + begin match (ty, t) with + | Base b, tb -> base oc (b, tb) + | Struct s, ts -> inits (s, ts) + end; + fprintf oc ";\n"; + () + + let extension = ".c" + + let comment oc s = + fprintf oc "/* %s */\n" s + + let prelude oc = List.iter (fprintf oc "%s\n") + [ "#include " + ; "#include " + ; "" + ; "static void fail(char *chk)" + ; "{" + ; "\tfprintf(stderr, \"fail: checking %s\\n\", chk);" + ; "\tabort();" + ; "}" + ; "" + ] + + let typedef oc name = function + | TA (Struct ts, _) -> + ctypelong oc name (Struct ts); + fprintf oc ";\n"; + | _ -> () + + let check oc name = + let chkbase: type a. string -> a bty * a -> unit = + fun name t -> + fprintf oc "\tif (%s != " name; + base oc t; + fprintf oc ")\n\t\tfail(%S);\n" name; in + function + | TA (Base b, tb) -> chkbase name (b, tb) + | TA (Struct s, ts) -> + let rec f: type a. int -> a sty * a -> unit = + fun i -> function + | Field (b, s), (tb, ts) -> + chkbase (Printf.sprintf "%s.f%d" name i) (b, tb); + f (i+1) (s, ts); + | Empty, () -> () in + f 1 (s, ts) + + let argname i = "arg" ^ string_of_int (i+1) + + let proto oc (TA (tret, _)) args = + ctype oc "ret" tret; + fprintf oc " f("; + let narg = List.length args in + List.iteri (fun i (TA (targ, _)) -> + ctype oc (argname i) targ; + fprintf oc " %s" (argname i); + if i <> narg-1 then + fprintf oc ", "; + ) args; + fprintf oc ")"; + () + + let caller oc ret args = + let narg = List.length args in + prelude oc; + typedef oc "ret" ret; + List.iteri (fun i arg -> + typedef oc (argname i) arg; + ) args; + proto oc ret args; + fprintf oc ";\n\nint main()\n{\n"; + List.iteri (fun i arg -> + fprintf oc "\t"; + init oc (argname i) arg; + ) args; + fprintf oc "\t"; + let TA (tret, _) = ret in + ctype oc "ret" tret; + fprintf oc " ret;\n\n"; + fprintf oc "\tret = f("; + List.iteri (fun i _ -> + fprintf oc "%s" (argname i); + if i <> narg-1 then + fprintf oc ", "; + ) args; + fprintf oc ");\n"; + check oc "ret" ret; + fprintf oc "\n\treturn 0;\n}\n"; + () + + let callee oc ret args = + prelude oc; + typedef oc "ret" ret; + List.iteri (fun i arg -> + typedef oc (argname i) arg; + ) args; + fprintf oc "\n"; + proto oc ret args; + fprintf oc "\n{\n\t"; + init oc "ret" ret; + fprintf oc "\n"; + List.iteri (fun i arg -> + check oc (argname i) arg; + ) args; + fprintf oc "\n\treturn ret;\n}\n"; + () + +end + +(* Code generation for QBE *) +module OutIL = struct + open Printf + + let comment oc s = + fprintf oc "# %s\n" s + + let tmp, lbl = + let next = ref 0 in + (fun () -> incr next; "%t" ^ (string_of_int !next)), + (fun () -> incr next; "@l" ^ (string_of_int !next)) + + let bvalue: type a. a bty * a -> string = function + | Char, i -> sprintf "%d" i + | Short, i -> sprintf "%d" i + | Int, i -> sprintf "%d" i + | Long, i -> sprintf "%d" i + | Float, f -> sprintf "s_%f" f + | Double, f -> sprintf "d_%f" f + + let btype: type a. a bty -> string = function + | Char -> "w" + | Short -> "w" + | Int -> "w" + | Long -> "l" + | Float -> "s" + | Double -> "d" + + let extension = ".ssa" + + let argname i = "arg" ^ string_of_int (i+1) + + let siter oc base s g = + let rec f: type a. int -> int -> a sty * a -> unit = + fun id off -> function + | Field (b, s), (tb, ts) -> + let off = align (btyalign b) off in + let addr = tmp () in + fprintf oc "\t%s =l add %d, %s\n" addr off base; + g id addr (TB (b, tb)); + f (id + 1) (off + btysize b) (s, ts); + | Empty, () -> () in + f 0 0 s + + let bmemtype b = + if AB b = AB Char then "b" else + if AB b = AB Short then "h" else + btype b + + let init oc = function + | TA (Base b, tb) -> bvalue (b, tb) + | TA (Struct s, ts) -> + let base = tmp () in + fprintf oc "\t%s =l alloc%d %d\n" + base (styalign s) (stysize s); + siter oc base (s, ts) + begin fun _ addr (TB (b, tb)) -> + fprintf oc "\tstore%s %s, %s\n" + (bmemtype b) (bvalue (b, tb)) addr; + end; + base + + let check oc id name = + let bcheck = fun id name (b, tb) -> + let tcmp = tmp () in + let nxtl = lbl () in + fprintf oc "\t%s =w ceq%s %s, %s\n" + tcmp (btype b) name (bvalue (b, tb)); + fprintf oc "\tstorew %d, %%failcode\n" id; + fprintf oc "\tjnz %s, %s, @fail\n" tcmp nxtl; + fprintf oc "%s\n" nxtl; in + function + | TA (Base Char, i) -> + let tval = tmp () in + fprintf oc "\t%s =w extsb %s\n" tval name; + bcheck id tval (Int, i) + | TA (Base Short, i) -> + let tval = tmp () in + fprintf oc "\t%s =w extsh %s\n" tval name; + bcheck id tval (Int, i) + | TA (Base b, tb) -> + bcheck id name (b, tb) + | TA (Struct s, ts) -> + siter oc name (s, ts) + begin fun id' addr (TB (b, tb)) -> + let tval = tmp () in + let lsuffix = + if AB b = AB Char then "sb" else + if AB b = AB Short then "sh" else + "" in + fprintf oc "\t%s =%s load%s %s\n" + tval (btype b) lsuffix addr; + bcheck (100*id + id'+1) tval (b, tb); + end; + () + + let ttype name = function + | TA (Base b, _) -> btype b + | TA (Struct _, _) -> ":" ^ name + + let typedef oc name = + let rec f: type a. a sty -> unit = function + | Field (b, s) -> + fprintf oc "%s" (bmemtype b); + if not (styempty s) then + fprintf oc ", "; + f s; + | Empty -> () in + function + | TA (Struct ts, _) -> + fprintf oc "type :%s = { " name; + f ts; + fprintf oc " }\n"; + | _ -> () + + let postlude oc = List.iter (fprintf oc "%s\n") + [ "@fail" + ; "# failure code" + ; "\t%fcode =w loadw %failcode" + ; "\t%f0 =w call $printf(l $failstr, w %fcode)" + ; "\t%f1 =w call $abort()" + ; "\tret 0" + ; "}" + ; "" + ; "data $failstr = { b \"fail on check %d\\n\", b 0 }" + ] + + let caller oc ret args = + let narg = List.length args in + List.iteri (fun i arg -> + typedef oc (argname i) arg; + ) args; + typedef oc "ret" ret; + fprintf oc "\nexport function w $main() {\n"; + fprintf oc "@start\n"; + fprintf oc "\t%%failcode =l alloc4 4\n"; + let targs = List.mapi (fun i arg -> + comment oc ("define argument " ^ (string_of_int (i+1))); + (ttype (argname i) arg, init oc arg) + ) args in + comment oc "call test function"; + fprintf oc "\t%%ret =%s call $f(" (ttype "ret" ret); + List.iteri (fun i (ty, tmp) -> + fprintf oc "%s %s" ty tmp; + if i <> narg-1 then + fprintf oc ", "; + ) targs; + fprintf oc ")\n"; + comment oc "check the return value"; + check oc 0 "%ret" ret; + fprintf oc "\tret 0\n"; + postlude oc; + () + + let callee oc ret args = + let narg = List.length args in + List.iteri (fun i arg -> + typedef oc (argname i) arg; + ) args; + typedef oc "ret" ret; + fprintf oc "\nexport function %s $f(" (ttype "ret" ret); + List.iteri (fun i arg -> + let a = argname i in + fprintf oc "%s %%%s" (ttype a arg) a; + if i <> narg-1 then + fprintf oc ", "; + ) args; + fprintf oc ") {\n"; + fprintf oc "@start\n"; + fprintf oc "\t%%failcode =l alloc4 4\n"; + List.iteri (fun i arg -> + comment oc ("checking argument " ^ (string_of_int (i+1))); + check oc (i+1) ("%" ^ argname i) arg; + ) args; + comment oc "define the return value"; + let rettmp = init oc ret in + fprintf oc "\tret %s\n" rettmp; + postlude oc; + () + +end + + +module type OUT = sig + val extension: string + val comment: out_channel -> string -> unit + val caller: out_channel -> testa -> testa list -> unit + val callee: out_channel -> testa -> testa list -> unit +end + +let _ = + let usage code = + Printf.eprintf "usage: abi.ml [-s SEED] DIR {c,ssa} {c,ssa}\n"; + exit code in + + let outmod = function + | "c" -> (module OutC : OUT) + | "ssa" -> (module OutIL: OUT) + | _ -> usage 1 in + + let seed, dir, mcaller, mcallee = + match Sys.argv with + | [| _; "-s"; seed; dir; caller; callee |] -> + let seed = + try Some (int_of_string seed) with + Failure _ -> usage 1 in + seed, dir, outmod caller, outmod callee + | [| _; dir; caller; callee |] -> + None, dir, outmod caller, outmod callee + | [| _; "-h" |] -> + usage 0 + | _ -> + usage 1 in + + let seed = Gen.init seed in + let tret = Gen.test () in + let targs = Gen.tests () in + let module OCaller = (val mcaller : OUT) in + let module OCallee = (val mcallee : OUT) in + let ocaller = open_out (dir ^ "/caller" ^ OCaller.extension) in + let ocallee = open_out (dir ^ "/callee" ^ OCallee.extension) in + OCaller.comment ocaller (Printf.sprintf "seed %d" seed); + OCallee.comment ocallee (Printf.sprintf "seed %d" seed); + OCaller.caller ocaller tret targs; + OCallee.callee ocallee tret targs; + () diff --git a/src/qbe/tools/cra.sh b/src/qbe/tools/cra.sh new file mode 100755 index 00000000..59882678 --- /dev/null +++ b/src/qbe/tools/cra.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +DIR=`cd $(dirname "$0"); pwd` +QBE=$DIR/../qbe +BUGF=/tmp/bug.id +FIND=$1 +FIND=${FIND:-afl-find} + +if ! test -f $BUGF +then + echo 1 > $BUGF +fi + +while true +do + ID=`cat $BUGF` + + if test `ls $FIND/crashes/id* | wc -l` -lt $ID + then + rm -f bug.ssa + echo "All done!" + exit 0 + fi + + BUG=`ls $FIND/crashes/id* | sed -ne "${ID}{p;q}"` + + echo "*** Crash $ID" + cp $BUG bug.ssa + + $QBE bug.ssa > /dev/null + RET=$? + if test \( $RET -ne 0 \) -a \( $RET -ne 1 \) + then + exit 1 + fi + + expr $ID + 1 > $BUGF +done diff --git a/src/qbe/tools/lexh.c b/src/qbe/tools/lexh.c new file mode 100644 index 00000000..efc30fe2 --- /dev/null +++ b/src/qbe/tools/lexh.c @@ -0,0 +1,94 @@ +/*% c99 -O3 -Wall -o # % + */ +#include +#include +#include +#include +#include + +char *tok[] = { + + "add", "sub", "neg", "div", "rem", "udiv", "urem", "mul", + "and", "or", "xor", "sar", "shr", "shl", "stored", + "stores", "storel", "storew", "storeh", "storeb", + "load", "loadsw", "loaduw", "loadsh", "loaduh", + "loadsb", "loadub", "extsw", "extuw", "extsh", + "extuh", "extsb", "extub", "exts", "truncd", + "stosi", "dtosi", "stoui", "dtoui", "uwtof", + "ultof", "swtof", "sltof", "cast", "copy", + "alloc4", "alloc8", "alloc16", "culew", "cultw", + "cslew", "csltw", "csgtw", "csgew", "cugtw", + "cugew", "ceqw", "cnew", "culel", "cultl", "cslel", + "csltl", "csgtl", "csgel", "cugtl", "cugel", + "ceql", "cnel", "cles", "clts", "cgts", "cges", + "cnes", "ceqs", "cos", "cuos", "cled", "cltd", + "cgtd", "cged", "cned", "ceqd", "cod", "cuod", + "vaarg", "vastart", "...", "env", "dbgloc", + + "call", "phi", "jmp", "jnz", "ret", "hlt", "export", + "function", "type", "data", "section", "align", "dbgfile", + "blit", "l", "w", "sh", "uh", "h", "sb", "ub", "b", + "d", "s", "z", "loadw", "loadl", "loads", "loadd", + "alloc1", "alloc2", "thread", "common", + +}; +enum { + Ntok = sizeof tok / sizeof tok[0] +}; + +uint32_t th[Ntok]; + +uint32_t +hash(char *s) +{ + uint32_t h; + + h = 0; + for (; *s; ++s) + h = *s + 17*h; + return h; +} + +int +main() +{ + char *bmap; + uint32_t h, M, K; + int i, j; + + bmap = malloc(1u << 31); + + for (i=0; i> M; + if (bmap[h]) + break; + bmap[h] = 1; + } + if (i==Ntok) { + printf("found K=%d for M=%d\n", K, M); + exit(0); + } + K += 2; + } while (K != 1); + } +} diff --git a/src/qbe/tools/log2.c b/src/qbe/tools/log2.c new file mode 100644 index 00000000..05f97c9b --- /dev/null +++ b/src/qbe/tools/log2.c @@ -0,0 +1,64 @@ +#include +#include + +typedef unsigned long long ullong; + +char seen[64]; +ullong rbg = 0x1e0298f7a7e; + +int +bit() +{ + int bit; + + bit = rbg & 1; + rbg >>= 1; + return bit; +} + +int +search(ullong n, int b, ullong *out) +{ + int i, x; + ullong y, z; + + if (b == 64) { + *out = n; + return 1; + } + + x = 63 & ((n << (63 - b)) >> 58); + assert(!(x & 0) && x <= 62); + y = bit(); + + for (i=0; i<2; i++) { + z = x | (y << 5); + if (!seen[z]) { + seen[z] = (63-b)+1; + if (search(n | (y << b), b+1, out)) + return 1; + seen[z] = 0; + } + y ^= 1; + } + + return 0; +} + +int +main() +{ + ullong out; + int i; + + if (search(0, 0, &out)) { + printf("0x%llx\n", out); + for (i=0; i<64; i++) { + printf((i&7) == 0 ? "\t" : " "); + printf("%2d,", seen[i]-1); + if ((i&7) == 7) + printf("\n"); + } + } else + puts("not found"); +} diff --git a/src/qbe/tools/mgen/.gitignore b/src/qbe/tools/mgen/.gitignore new file mode 100644 index 00000000..1bb5834c --- /dev/null +++ b/src/qbe/tools/mgen/.gitignore @@ -0,0 +1,3 @@ +*.cm[iox] +*.o +mgen diff --git a/src/qbe/tools/mgen/.ocp-indent b/src/qbe/tools/mgen/.ocp-indent new file mode 100644 index 00000000..28b3b288 --- /dev/null +++ b/src/qbe/tools/mgen/.ocp-indent @@ -0,0 +1 @@ +match_clause=4 diff --git a/src/qbe/tools/mgen/Makefile b/src/qbe/tools/mgen/Makefile new file mode 100644 index 00000000..1b94436e --- /dev/null +++ b/src/qbe/tools/mgen/Makefile @@ -0,0 +1,16 @@ +BIN = mgen +SRC = \ + match.ml \ + fuzz.ml \ + cgen.ml \ + sexp.ml \ + test.ml \ + main.ml + +$(BIN): $(SRC) + ocamlopt -o $(BIN) -g str.cmxa $(SRC) + +clean: + rm -f *.cm? *.o $(BIN) + +.PHONY: clean diff --git a/src/qbe/tools/mgen/cgen.ml b/src/qbe/tools/mgen/cgen.ml new file mode 100644 index 00000000..297265cc --- /dev/null +++ b/src/qbe/tools/mgen/cgen.ml @@ -0,0 +1,420 @@ +open Match + +type options = + { pfx: string + ; static: bool + ; oc: out_channel } + +type side = L | R + +type id_pred = + | InBitSet of Int64.t + | Ge of int + | Eq of int + +and id_test = + | Pred of (side * id_pred) + | And of id_test * id_test + +type case_code = + | Table of ((int * int) * int) list + | IfThen of + { test: id_test + ; cif: case_code + ; cthen: case_code option } + | Return of int + +type case = + { swap: bool + ; code: case_code } + +let cgen_case tmp nstates map = + let cgen_test ids = + match ids with + | [id] -> Eq id + | _ -> + let min_id = + List.fold_left min max_int ids in + if List.length ids = nstates - min_id + then Ge min_id + else begin + assert (nstates <= 64); + InBitSet + (List.fold_left (fun bs id -> + Int64.logor bs + (Int64.shift_left 1L id)) + 0L ids) + end + in + let symmetric = + let inverse ((l, r), x) = ((r, l), x) in + setify map = setify (List.map inverse map) in + let map = + let ordered ((l, r), _) = r <= l in + if symmetric then + List.filter ordered map + else map + in + let exception BailToTable in + try + let st = + match setify (List.map snd map) with + | [st] -> st + | _ -> raise BailToTable + in + (* the operation considered can only + * generate a single state *) + let pairs = List.map fst map in + let ls, rs = List.split pairs in + let ls = setify ls and rs = setify rs in + if List.length ls > 1 && List.length rs > 1 then + raise BailToTable; + { swap = symmetric + ; code = + let pl = Pred (L, cgen_test ls) + and pr = Pred (R, cgen_test rs) in + IfThen + { test = And (pl, pr) + ; cif = Return st + ; cthen = Some (Return tmp) } } + with BailToTable -> + { swap = symmetric + ; code = Table map } + +let show_op (_cls, op) = + "O" ^ show_op_base op + +let indent oc i = + Printf.fprintf oc "%s" (String.sub "\t\t\t\t\t" 0 i) + +let emit_swap oc i = + let pf m = Printf.fprintf oc m in + let pfi n m = indent oc n; pf m in + pfi i "if (l < r)\n"; + pfi (i+1) "t = l, l = r, r = t;\n" + +let gen_tables oc tmp pfx nstates (op, c) = + let i = 1 in + let pf m = Printf.fprintf oc m in + let pfi n m = indent oc n; pf m in + let ntables = ref 0 in + (* we must follow the order in which + * we visit code in emit_case, or + * else ntables goes out of sync *) + let base = pfx ^ show_op op in + let swap = c.swap in + let rec gen c = + match c with + | Table map -> + let name = + if !ntables = 0 then base else + base ^ string_of_int !ntables + in + assert (nstates <= 256); + if swap then + let n = nstates * (nstates + 1) / 2 in + pfi i "static uchar %stbl[%d] = {\n" name n + else + pfi i "static uchar %stbl[%d][%d] = {\n" + name nstates nstates; + for l = 0 to nstates - 1 do + pfi (i+1) ""; + for r = 0 to nstates - 1 do + if not swap || r <= l then + begin + pf "%d" + (try List.assoc (l,r) map + with Not_found -> tmp); + pf ","; + end + done; + pf "\n"; + done; + pfi i "};\n" + | IfThen {cif; cthen} -> + gen cif; + Option.iter gen cthen + | Return _ -> () + in + gen c.code + +let emit_case oc pfx no_swap (op, c) = + let fpf = Printf.fprintf in + let pf m = fpf oc m in + let pfi n m = indent oc n; pf m in + let rec side oc = function + | L -> fpf oc "l" + | R -> fpf oc "r" + in + let pred oc (s, pred) = + match pred with + | InBitSet bs -> fpf oc "BIT(%a) & %#Lx" side s bs + | Eq id -> fpf oc "%a == %d" side s id + | Ge id -> fpf oc "%d <= %a" id side s + in + let base = pfx ^ show_op op in + let swap = c.swap in + let ntables = ref 0 in + let rec code i c = + match c with + | Return id -> pfi i "return %d;\n" id + | Table map -> + let name = + if !ntables = 0 then base else + base ^ string_of_int !ntables + in + incr ntables; + if swap then + pfi i "return %stbl[(l + l*l)/2 + r];\n" name + else pfi i "return %stbl[l][r];\n" name + | IfThen ({test = And (And (t1, t2), t3)} as r) -> + code i @@ IfThen + {r with test = And (t1, And (t2, t3))} + | IfThen {test = And (Pred p, t); cif; cthen} -> + pfi i "if (%a)\n" pred p; + code i (IfThen {test = t; cif; cthen}) + | IfThen {test = Pred p; cif; cthen} -> + pfi i "if (%a) {\n" pred p; + code (i+1) cif; + pfi i "}\n"; + Option.iter (code i) cthen + in + pfi 1 "case %s:\n" (show_op op); + if not no_swap && c.swap then + emit_swap oc 2; + code 2 c.code + +let emit_list + ?(limit=60) ?(cut_before_sep=false) + ~col ~indent:i ~sep ~f oc l = + let sl = String.length sep in + let rstripped_sep, rssl = + if sep.[sl - 1] = ' ' then + String.sub sep 0 (sl - 1), sl - 1 + else sep, sl + in + let lstripped_sep, lssl = + if sep.[0] = ' ' then + String.sub sep 1 (sl - 1), sl - 1 + else sep, sl + in + let rec line col acc = function + | [] -> (List.rev acc, []) + | s :: l -> + let col = col + sl + String.length s in + let no_space = + if cut_before_sep || l = [] then + col > limit + else + col + rssl > limit + in + if no_space then + (List.rev acc, s :: l) + else + line col (s :: acc) l + in + let rec go col l = + if l = [] then () else + let ll, l = line col [] l in + Printf.fprintf oc "%s" (String.concat sep ll); + if l <> [] && cut_before_sep then begin + Printf.fprintf oc "\n"; + indent oc i; + Printf.fprintf oc "%s" lstripped_sep; + go (8*i + lssl) l + end else if l <> [] then begin + Printf.fprintf oc "%s\n" rstripped_sep; + indent oc i; + go (8*i) l + end else () + in + go col (List.map f l) + +let emit_numberer opts n = + let pf m = Printf.fprintf opts.oc m in + let tmp = (atom_state n Tmp).id in + let con = (atom_state n AnyCon).id in + let nst = Array.length n.states in + let cases = + StateMap.by_ops n.statemap |> + List.map (fun (op, map) -> + (op, cgen_case tmp nst map)) + in + let all_swap = + List.for_all (fun (_, c) -> c.swap) cases in + (* opn() *) + if opts.static then pf "static "; + pf "int\n"; + pf "%sopn(int op, int l, int r)\n" opts.pfx; + pf "{\n"; + cases |> List.iter + (gen_tables opts.oc tmp opts.pfx nst); + if List.exists (fun (_, c) -> c.swap) cases then + pf "\tint t;\n\n"; + if all_swap then emit_swap opts.oc 1; + pf "\tswitch (op) {\n"; + cases |> List.iter + (emit_case opts.oc opts.pfx all_swap); + pf "\tdefault:\n"; + pf "\t\treturn %d;\n" tmp; + pf "\t}\n"; + pf "}\n\n"; + (* refn() *) + if opts.static then pf "static "; + pf "int\n"; + pf "%srefn(Ref r, Num *tn, Con *con)\n" opts.pfx; + pf "{\n"; + let cons = + List.filter_map (function + | (Con c, s) -> Some (c, s.id) + | _ -> None) + n.atoms + in + if cons <> [] then + pf "\tint64_t n;\n\n"; + pf "\tswitch (rtype(r)) {\n"; + pf "\tcase RTmp:\n"; + if tmp <> 0 then begin + assert + (List.exists (fun (_, s) -> + s.id = 0 + ) n.atoms && + (* no temp should ever get state 0 *) + List.for_all (fun (a, s) -> + s.id <> 0 || + match a with + | AnyCon | Con _ -> true + | _ -> false + ) n.atoms); + pf "\t\tif (!tn[r.val].n)\n"; + pf "\t\t\ttn[r.val].n = %d;\n" tmp; + end; + pf "\t\treturn tn[r.val].n;\n"; + pf "\tcase RCon:\n"; + if cons <> [] then begin + pf "\t\tif (con[r.val].type != CBits)\n"; + pf "\t\t\treturn %d;\n" con; + pf "\t\tn = con[r.val].bits.i;\n"; + cons |> inverse |> group_by_fst + |> List.iter (fun (id, cs) -> + pf "\t\tif ("; + emit_list ~cut_before_sep:true + ~col:20 ~indent:2 ~sep:" || " + ~f:(fun c -> "n == " ^ Int64.to_string c) + opts.oc cs; + pf ")\n"; + pf "\t\t\treturn %d;\n" id + ); + end; + pf "\t\treturn %d;\n" con; + pf "\tdefault:\n"; + pf "\t\treturn INT_MIN;\n"; + pf "\t}\n"; + pf "}\n\n"; + (* match[]: patterns per state *) + if opts.static then pf "static "; + pf "bits %smatch[%d] = {\n" opts.pfx nst; + n.states |> Array.iteri (fun sn s -> + let tops = + List.filter_map (function + | Top ("$" | "%") -> None + | Top r -> Some ("BIT(P" ^ r ^ ")") + | _ -> None) s.point |> setify + in + if tops <> [] then + pf "\t[%d] = %s,\n" + sn (String.concat " | " tops); + ); + pf "};\n\n" + +let var_id vars f = + List.mapi (fun i x -> (x, i)) vars |> + List.assoc f + +let compile_action vars act = + let pcs = Hashtbl.create 100 in + let rec gen pc (act: Action.t) = + try + [10 + Hashtbl.find pcs act.id] + with Not_found -> + let code = + match act.node with + | Action.Stop -> + [0] + | Action.Push (sym, k) -> + let c = if sym then 1 else 2 in + [c] @ gen (pc + 1) k + | Action.Set (v, {node = Action.Pop k; _}) + | Action.Set (v, ({node = Action.Stop; _} as k)) -> + let v = var_id vars v in + [3; v] @ gen (pc + 2) k + | Action.Set _ -> + (* for now, only atomic patterns can be + * tied to a variable, so Set must be + * followed by either Pop or Stop *) + assert false + | Action.Pop k -> + [4] @ gen (pc + 1) k + | Action.Switch cases -> + let cases = + inverse cases |> group_by_fst |> + List.sort (fun (_, cs1) (_, cs2) -> + let n1 = List.length cs1 + and n2 = List.length cs2 in + compare n2 n1) + in + (* the last case is the one with + * the max number of entries *) + let cases = List.rev (List.tl cases) + and last = fst (List.hd cases) in + let ncases = + List.fold_left (fun n (_, cs) -> + List.length cs + n) + 0 cases + in + let body_off = 2 + 2 * ncases + 1 in + let pc, tbl, body = + List.fold_left + (fun (pc, tbl, body) (a, cs) -> + let ofs = body_off + List.length body in + let case = gen pc a in + let pc = pc + List.length case in + let body = body @ case in + let tbl = + List.fold_left (fun tbl c -> + tbl @ [c; ofs] + ) tbl cs + in + (pc, tbl, body)) + (pc + body_off, [], []) + cases + in + let ofs = body_off + List.length body in + let tbl = tbl @ [ofs] in + assert (2 + List.length tbl = body_off); + [5; ncases] @ tbl @ body @ gen pc last + in + if act.node <> Action.Stop then + Hashtbl.replace pcs act.id pc; + code + in + gen 0 act + +let emit_matchers opts ms = + let pf m = Printf.fprintf opts.oc m in + if opts.static then pf "static "; + pf "uchar *%smatcher[] = {\n" opts.pfx; + List.iter (fun (vars, pname, m) -> + pf "\t[P%s] = (uchar[]){\n" pname; + pf "\t\t"; + let bytes = compile_action vars m in + emit_list + ~col:16 ~indent:2 ~sep:"," + ~f:string_of_int opts.oc bytes; + pf "\n"; + pf "\t},\n") + ms; + pf "};\n\n" + +let emit_c opts n = + emit_numberer opts n diff --git a/src/qbe/tools/mgen/fuzz.ml b/src/qbe/tools/mgen/fuzz.ml new file mode 100644 index 00000000..08212864 --- /dev/null +++ b/src/qbe/tools/mgen/fuzz.ml @@ -0,0 +1,413 @@ +(* fuzz the tables and matchers generated *) +open Match + +module Buffer: sig + type 'a t + val create: ?capacity:int -> unit -> 'a t + val reset: 'a t -> unit + val size: 'a t -> int + val get: 'a t -> int -> 'a + val set: 'a t -> int -> 'a -> unit + val push: 'a t -> 'a -> unit +end = struct + type 'a t = + { mutable size: int + ; mutable data: 'a array } + let mk_array n = Array.make n (Obj.magic 0) + let create ?(capacity = 10) () = + if capacity < 0 then invalid_arg "Buffer.make"; + {size = 0; data = mk_array capacity} + let reset b = b.size <- 0 + let size b = b.size + let get b n = + if n >= size b then invalid_arg "Buffer.get"; + b.data.(n) + let set b n x = + if n >= size b then invalid_arg "Buffer.set"; + b.data.(n) <- x + let push b x = + let cap = Array.length b.data in + if size b = cap then begin + let data = mk_array (2 * cap + 1) in + Array.blit b.data 0 data 0 cap; + b.data <- data + end; + let sz = size b in + b.size <- sz + 1; + set b sz x +end + +let binop_state n op s1 s2 = + let key = K (op, s1, s2) in + try StateMap.find key n.statemap + with Not_found -> atom_state n Tmp + +type id = int +type term_data = + | Binop of op * id * id + | Leaf of atomic_pattern +type term = + { id: id + ; data: term_data + ; state: p state } + +let pp_term fmt (ta, id) = + let fpf x = Format.fprintf fmt x in + let rec pp _fmt id = + match ta.(id).data with + | Leaf (Con c) -> fpf "%Ld" c + | Leaf AnyCon -> fpf "$%d" id + | Leaf Tmp -> fpf "%%%d" id + | Binop (op, id1, id2) -> + fpf "@[(%s@%d:%d @[%a@ %a@])@]" + (show_op op) id ta.(id).state.id + pp id1 pp id2 + in pp fmt id + +(* A term pool is a deduplicated set of term + * that maintains nodes numbering using the + * statemap passed at creation time *) +module TermPool = struct + type t = + { terms: term Buffer.t + ; hcons: (term_data, id) Hashtbl.t + ; numbr: numberer } + + let create numbr = + { terms = Buffer.create () + ; hcons = Hashtbl.create 100 + ; numbr } + let reset tp = + Buffer.reset tp.terms; + Hashtbl.clear tp.hcons + + let size tp = Buffer.size tp.terms + let term tp id = Buffer.get tp.terms id + + let mk_leaf tp atm = + let data = Leaf atm in + match Hashtbl.find tp.hcons data with + | id -> term tp id + | exception Not_found -> + let id = Buffer.size tp.terms in + let state = atom_state tp.numbr atm in + Buffer.push tp.terms {id; data; state}; + Hashtbl.add tp.hcons data id; + term tp id + let mk_binop tp op t1 t2 = + let data = Binop (op, t1.id, t2.id) in + match Hashtbl.find tp.hcons data with + | id -> term tp id + | exception Not_found -> + let id = Buffer.size tp.terms in + let state = + binop_state tp.numbr op t1.state t2.state + in + Buffer.push tp.terms {id; data; state}; + Hashtbl.add tp.hcons data id; + term tp id + + let rec add_pattern tp = function + | Bnr (op, p1, p2) -> + let t1 = add_pattern tp p1 in + let t2 = add_pattern tp p2 in + mk_binop tp op t1 t2 + | Atm atm -> mk_leaf tp atm + | Var (_, atm) -> add_pattern tp (Atm atm) + + let explode_term tp id = + let rec aux tms n id = + let t = term tp id in + match t.data with + | Leaf _ -> (n, {t with id = n} :: tms) + | Binop (op, id1, id2) -> + let n1, tms = aux tms n id1 in + let n = n1 + 1 in + let n2, tms = aux tms n id2 in + let n = n2 + 1 in + (n, { t with data = Binop (op, n1, n2) + ; id = n } :: tms) + in + let n, tms = aux [] 0 id in + Array.of_list (List.rev tms), n +end + +module R = Random + +(* uniform pick in a list *) +let list_pick l = + let rec aux n l x = + match l with + | [] -> x + | y :: l -> + if R.int (n + 1) = 0 then + aux (n + 1) l y + else + aux (n + 1) l x + in + match l with + | [] -> invalid_arg "list_pick" + | x :: l -> aux 1 l x + +let term_pick ~numbr = + let ops = + if numbr.ops = [] then + numbr.ops <- + (StateMap.fold (fun k _ ops -> + match k with + | K (op, _, _) -> op :: ops) + numbr.statemap [] |> setify); + numbr.ops + in + let rec gen depth = + (* exponential probability for leaves to + * avoid skewing towards shallow terms *) + let atm_prob = 0.75 ** float_of_int depth in + if R.float 1.0 <= atm_prob || ops = [] then + let atom, st = list_pick numbr.atoms in + (st, Atm atom) + else + let op = list_pick ops in + let s1, t1 = gen (depth - 1) in + let s2, t2 = gen (depth - 1) in + ( binop_state numbr op s1 s2 + , Bnr (op, t1, t2) ) + in fun ~depth -> gen depth + +exception FuzzError + +let rec pattern_depth = function + | Bnr (_, p1, p2) -> + 1 + max (pattern_depth p1) (pattern_depth p2) + | Atm _ -> 0 + | Var (_, atm) -> pattern_depth (Atm atm) + +let ( %% ) a b = + 1e2 *. float_of_int a /. float_of_int b + +let progress ?(width = 50) msg pct = + Format.eprintf "\x1b[2K\r%!"; + let progress_bar fmt = + let n = + let fwidth = float_of_int width in + 1 + int_of_float (pct *. fwidth /. 1e2) + in + Format.fprintf fmt " %s%s %.0f%%@?" + (String.concat "" (List.init n (fun _ -> "▒"))) + (String.make (max 0 (width - n)) '-') + pct + in + Format.kfprintf progress_bar + Format.err_formatter msg + +let fuzz_numberer rules numbr = + (* pick twice the max pattern depth so we + * have a chance to find non-trivial numbers + * for the atomic patterns in the rules *) + let depth = + List.fold_left (fun depth r -> + max depth (pattern_depth r.pattern)) + 0 rules * 2 + in + (* fuzz until the term pool we are constructing + * is no longer growing fast enough; or we just + * went through sufficiently many iterations *) + let max_iter = 1_000_000 in + let low_insert_rate = 1e-2 in + let tp = TermPool.create numbr in + let rec loop new_stats i = + let (_, _, insert_rate) = new_stats in + if insert_rate <= low_insert_rate then () else + if i >= max_iter then () else + (* periodically update stats *) + let new_stats = + let (num, cnt, rate) = new_stats in + if num land 1023 = 0 then + let rate = + 0.5 *. (rate +. float_of_int cnt /. 1023.) + in + progress " insert_rate=%.1f%%" + (i %% max_iter) (rate *. 1e2); + (num + 1, 0, rate) + else new_stats + in + (* create a term and check that its number is + * accurate wrt the rules *) + let st, term = term_pick ~numbr ~depth in + let state_matched = + List.filter_map (fun cu -> + match cu with + | Top ("$" | "%") -> None + | Top name -> Some name + | _ -> None) + st.point |> setify + in + let rule_matched = + List.filter_map (fun r -> + if pattern_match r.pattern term then + Some r.name + else None) + rules |> setify + in + if state_matched <> rule_matched then begin + let open Format in + let pp_str_list = + let pp_sep fmt () = fprintf fmt ",@ " in + pp_print_list ~pp_sep pp_print_string + in + eprintf "@.@[fuzz error for %s" + (show_pattern term); + eprintf "@ @[state matched: %a@]" + pp_str_list state_matched; + eprintf "@ @[rule matched: %a@]" + pp_str_list rule_matched; + eprintf "@]@."; + raise FuzzError; + end; + if state_matched = [] then + loop new_stats (i + 1) + else + (* add to the term pool *) + let old_size = TermPool.size tp in + let _ = TermPool.add_pattern tp term in + let new_stats = + let (num, cnt, rate) = new_stats in + if TermPool.size tp <> old_size then + (num + 1, cnt + 1, rate) + else + (num + 1, cnt, rate) + in + loop new_stats (i + 1) + in + loop (1, 0, 1.0) 0; + Format.eprintf + "@.@[ generated %.3fMiB of test terms@]@." + (float_of_int (Obj.reachable_words (Obj.repr tp)) + /. 128. /. 1024.); + tp + +let rec run_matcher stk m (ta, id as t) = + let state id = ta.(id).state.id in + match m.Action.node with + | Action.Switch cases -> + let m = + try List.assoc (state id) cases + with Not_found -> failwith "no switch case" + in + run_matcher stk m t + | Action.Push (sym, m) -> + let l, r = + match ta.(id).data with + | Leaf _ -> failwith "push on leaf" + | Binop (_, l, r) -> (l, r) + in + if sym && state l > state r + then run_matcher (l :: stk) m (ta, r) + else run_matcher (r :: stk) m (ta, l) + | Action.Pop m -> begin + match stk with + | id :: stk -> run_matcher stk m (ta, id) + | [] -> failwith "pop on empty stack" + end + | Action.Set (v, m) -> + (v, id) :: run_matcher stk m t + | Action.Stop -> [] + +let rec term_match p (ta, id) = + let (|>>) x f = + match x with None -> None | Some x -> f x + in + let atom_match a = + match ta.(id).data with + | Leaf a' -> pattern_match (Atm a) (Atm a') + | Binop _ -> pattern_match (Atm a) (Atm Tmp) + in + match p with + | Var (v, a) when atom_match a -> + Some [(v, id)] + | Atm a when atom_match a -> Some [] + | (Atm _ | Var _) -> None + | Bnr (op, pl, pr) -> begin + match ta.(id).data with + | Binop (op', idl, idr) when op' = op -> + term_match pl (ta, idl) |>> fun l1 -> + term_match pr (ta, idr) |>> fun l2 -> + Some (l1 @ l2) + | _ -> None + end + +let test_matchers tp numbr rules = + let {statemap = sm; states = sa; _} = numbr in + let total = ref 0 in + let matchers = + let htbl = Hashtbl.create (Array.length sa) in + List.map (fun r -> (r.name, r.pattern)) rules |> + group_by_fst |> + List.iter (fun (r, ps) -> + total := !total + List.length ps; + let pm = (ps, lr_matcher sm sa rules r) in + sa |> Array.iter (fun s -> + if List.mem (Top r) s.point then + Hashtbl.add htbl s.id pm)); + htbl + in + let seen = Hashtbl.create !total in + for id = 0 to TermPool.size tp - 1 do + if id land 1023 = 0 || + id = TermPool.size tp - 1 then begin + progress + " coverage=%.1f%%" + (id %% TermPool.size tp) + (Hashtbl.length seen %% !total) + end; + let t = TermPool.explode_term tp id in + Hashtbl.find_all matchers + (TermPool.term tp id).state.id |> + List.iter (fun (ps, m) -> + let norm = List.fast_sort compare in + let ok = + match norm (run_matcher [] m t) with + | asn -> `Match (List.exists (fun p -> + match term_match p t with + | None -> false + | Some asn' -> + if asn = norm asn' then begin + Hashtbl.replace seen p (); + true + end else false) ps) + | exception e -> `RunFailure e + in + if ok <> `Match true then begin + let open Format in + let pp_asn fmt asn = + fprintf fmt "@["; + pp_print_list + ~pp_sep:(fun fmt () -> fprintf fmt ";@ ") + (fun fmt (v, d) -> + fprintf fmt "@[%s←%d@]" v d) + fmt asn; + fprintf fmt "@]" + in + eprintf "@.@[matcher error for"; + eprintf "@ @[%a@]" pp_term t; + begin match ok with + | `RunFailure e -> + eprintf "@ @[exception: %s@]" + (Printexc.to_string e) + | `Match (* false *) _ -> + let asn = run_matcher [] m t in + eprintf "@ @[assignment: %a@]" + pp_asn asn; + eprintf "@ @[could not match"; + List.iter (fun p -> + eprintf "@ + @[%s@]" + (show_pattern p)) ps; + eprintf "@]" + end; + eprintf "@]@."; + raise FuzzError + end) + done; + Format.eprintf "@." + + diff --git a/src/qbe/tools/mgen/main.ml b/src/qbe/tools/mgen/main.ml new file mode 100644 index 00000000..fb49259e --- /dev/null +++ b/src/qbe/tools/mgen/main.ml @@ -0,0 +1,214 @@ +open Cgen +open Match + +let mgen ~verbose ~fuzz path lofs input oc = + let info ?(level = 1) fmt = + if level <= verbose then + Printf.eprintf fmt + else + Printf.ifprintf stdout fmt + in + + let rules = + match Sexp.(run_parser ppats) input with + | `Error (ps, err, loc) -> + Printf.eprintf "%s:%d:%d %s\n" + path (lofs + ps.Sexp.line) ps.Sexp.coln err; + Printf.eprintf "%s" loc; + exit 1 + | `Ok rules -> rules + in + + info "adding ac variants...%!"; + let nparsed = + List.fold_left + (fun npats (_, _, ps) -> + npats + List.length ps) + 0 rules + in + let varsmap = Hashtbl.create 10 in + let rules = + List.concat_map (fun (name, vars, patterns) -> + (try assert (Hashtbl.find varsmap name = vars) + with Not_found -> ()); + Hashtbl.replace varsmap name vars; + List.map + (fun pattern -> {name; vars; pattern}) + (List.concat_map ac_equiv patterns) + ) rules + in + info " %d -> %d patterns\n" + nparsed (List.length rules); + + let rnames = + setify (List.map (fun r -> r.name) rules) in + + info "generating match tables...%!"; + let sa, am, sm = generate_table rules in + let numbr = make_numberer sa am sm in + info " %d states, %d rules\n" + (Array.length sa) (StateMap.cardinal sm); + if verbose >= 2 then begin + info "-------------\nstates:\n"; + Array.iteri (fun i s -> + info " state %d: %s\n" + i (show_pattern s.seen)) sa; + info "-------------\nstatemap:\n"; + Test.print_sm stderr sm; + info "-------------\n"; + end; + + info "generating matchers...\n"; + let matchers = + List.map (fun rname -> + info "+ %s...%!" rname; + let m = lr_matcher sm sa rules rname in + let vars = Hashtbl.find varsmap rname in + info " %d nodes\n" (Action.size m); + info ~level:2 " -------------\n"; + info ~level:2 " automaton:\n"; + info ~level:2 "%s\n" + (Format.asprintf " @[%a@]" Action.pp m); + info ~level:2 " ----------\n"; + (vars, rname, m) + ) rnames + in + + if fuzz then begin + info ~level:0 "fuzzing statemap...\n"; + let tp = Fuzz.fuzz_numberer rules numbr in + info ~level:0 "testing %d patterns...\n" + (List.length rules); + Fuzz.test_matchers tp numbr rules + end; + + info "emitting C...\n"; + flush stderr; + + let cgopts = + { pfx = ""; static = true; oc = oc } in + emit_c cgopts numbr; + emit_matchers cgopts matchers; + + () + +let read_all ic = + let bufsz = 4096 in + let buf = Bytes.create bufsz in + let data = Buffer.create bufsz in + let read = ref 0 in + while + read := input ic buf 0 bufsz; + !read <> 0 + do + Buffer.add_subbytes data buf 0 !read + done; + Buffer.contents data + +let split_c src = + let begin_re, eoc_re, end_re = + let re = Str.regexp in + ( re "mgen generated code" + , re "\\*/" + , re "end of generated code" ) + in + let str_match regexp str = + try + let _: int = + Str.search_forward regexp str 0 + in true + with Not_found -> false + in + + let rec go st lofs pfx rules lines = + let line, lines = + match lines with + | [] -> + failwith ( + match st with + | `Prefix -> "could not find mgen section" + | `Rules -> "mgen rules not terminated" + | `Skip -> "mgen section not terminated" + ) + | l :: ls -> (l, ls) + in + match st with + | `Prefix -> + let pfx = line :: pfx in + if str_match begin_re line + then + let lofs = List.length pfx in + go `Rules lofs pfx rules lines + else go `Prefix 0 pfx rules lines + | `Rules -> + let pfx = line :: pfx in + if str_match eoc_re line + then go `Skip lofs pfx rules lines + else go `Rules lofs pfx (line :: rules) lines + | `Skip -> + if str_match end_re line then + let join = String.concat "\n" in + let pfx = join (List.rev pfx) ^ "\n\n" + and rules = join (List.rev rules) + and sfx = join (line :: lines) + in (lofs, pfx, rules, sfx) + else go `Skip lofs pfx rules lines + in + + let lines = String.split_on_char '\n' src in + go `Prefix 0 [] [] lines + +let () = + let usage_msg = + "mgen [--fuzz] [--verbose ] " in + + let fuzz_arg = ref false in + let verbose_arg = ref 0 in + let input_paths = ref [] in + + let anon_fun filename = + input_paths := filename :: !input_paths in + + let speclist = + [ ( "--fuzz", Arg.Set fuzz_arg + , " Fuzz tables and matchers" ) + ; ( "--verbose", Arg.Set_int verbose_arg + , " Set verbosity level" ) + ; ( "--", Arg.Rest_all (List.iter anon_fun) + , " Stop argument parsing" ) ] + in + Arg.parse speclist anon_fun usage_msg; + + let input_paths = !input_paths in + let verbose = !verbose_arg in + let fuzz = !fuzz_arg in + let input_path, input = + match input_paths with + | ["-"] -> ("-", read_all stdin) + | [path] -> (path, read_all (open_in path)) + | _ -> + Printf.eprintf + "%s: single input file expected\n" + Sys.argv.(0); + Arg.usage speclist usage_msg; exit 1 + in + let mgen = mgen ~verbose ~fuzz in + + if Str.last_chars input_path 2 <> ".c" + then mgen input_path 0 input stdout + else + let tmp_path = input_path ^ ".tmp" in + Fun.protect + ~finally:(fun () -> + try Sys.remove tmp_path with _ -> ()) + (fun () -> + let lofs, pfx, rules, sfx = split_c input in + let oc = open_out tmp_path in + output_string oc pfx; + mgen input_path lofs rules oc; + output_string oc sfx; + close_out oc; + Sys.rename tmp_path input_path; + ()); + + () diff --git a/src/qbe/tools/mgen/match.ml b/src/qbe/tools/mgen/match.ml new file mode 100644 index 00000000..9c02ca49 --- /dev/null +++ b/src/qbe/tools/mgen/match.ml @@ -0,0 +1,651 @@ +type cls = Kw | Kl | Ks | Kd +type op_base = + | Oadd + | Osub + | Omul + | Oor + | Oshl + | Oshr +type op = cls * op_base + +let op_bases = + [Oadd; Osub; Omul; Oor; Oshl; Oshr] + +let commutative = function + | (_, (Oadd | Omul | Oor)) -> true + | (_, _) -> false + +let associative = function + | (_, (Oadd | Omul | Oor)) -> true + | (_, _) -> false + +type atomic_pattern = + | Tmp + | AnyCon + | Con of int64 +(* Tmp < AnyCon < Con k *) + +type pattern = + | Bnr of op * pattern * pattern + | Atm of atomic_pattern + | Var of string * atomic_pattern + +let is_atomic = function + | (Atm _ | Var _) -> true + | _ -> false + +let show_op_base o = + match o with + | Oadd -> "add" + | Osub -> "sub" + | Omul -> "mul" + | Oor -> "or" + | Oshl -> "shl" + | Oshr -> "shr" + +let show_op (k, o) = + show_op_base o ^ + (match k with + | Kw -> "w" + | Kl -> "l" + | Ks -> "s" + | Kd -> "d") + +let rec show_pattern p = + match p with + | Atm Tmp -> "%" + | Atm AnyCon -> "$" + | Atm (Con n) -> Int64.to_string n + | Var (v, p) -> + show_pattern (Atm p) ^ "'" ^ v + | Bnr (o, pl, pr) -> + "(" ^ show_op o ^ + " " ^ show_pattern pl ^ + " " ^ show_pattern pr ^ ")" + +let get_atomic p = + match p with + | (Atm a | Var (_, a)) -> Some a + | _ -> None + +let rec pattern_match p w = + match p with + | Var (_, p) -> + pattern_match (Atm p) w + | Atm Tmp -> + begin match get_atomic w with + | Some (Con _ | AnyCon) -> false + | _ -> true + end + | Atm (Con _) -> w = p + | Atm (AnyCon) -> + not (pattern_match (Atm Tmp) w) + | Bnr (o, pl, pr) -> + begin match w with + | Bnr (o', wl, wr) -> + o' = o && + pattern_match pl wl && + pattern_match pr wr + | _ -> false + end + +type +'a cursor = (* a position inside a pattern *) + | Bnrl of op * 'a cursor * pattern + | Bnrr of op * pattern * 'a cursor + | Top of 'a + +let rec fold_cursor c p = + match c with + | Bnrl (o, c', p') -> fold_cursor c' (Bnr (o, p, p')) + | Bnrr (o, p', c') -> fold_cursor c' (Bnr (o, p', p)) + | Top _ -> p + +let peel p x = + let once out (p, c) = + match p with + | Var (_, p) -> (Atm p, c) :: out + | Atm _ -> (p, c) :: out + | Bnr (o, pl, pr) -> + (pl, Bnrl (o, c, pr)) :: + (pr, Bnrr (o, pl, c)) :: out + in + let rec go l = + let l' = List.fold_left once [] l in + if List.length l' = List.length l + then l' + else go l' + in go [(p, Top x)] + +let fold_pairs l1 l2 ini f = + let rec go acc = function + | [] -> acc + | a :: l1' -> + go (List.fold_left + (fun acc b -> f (a, b) acc) + acc l2) l1' + in go ini l1 + +let iter_pairs l f = + fold_pairs l l () (fun x () -> f x) + +let inverse l = + List.map (fun (a, b) -> (b, a)) l + +type 'a state = + { id: int + ; seen: pattern + ; point: ('a cursor) list } + +let rec binops side {point; _} = + List.filter_map (fun c -> + match c, side with + | Bnrl (o, c, r), `L -> Some ((o, c), r) + | Bnrr (o, l, c), `R -> Some ((o, c), l) + | _ -> None) + point + +let group_by_fst l = + List.fast_sort (fun (a, _) (b, _) -> + compare a b) l |> + List.fold_left (fun (oo, l, res) (o', c) -> + match oo with + | None -> (Some o', [c], []) + | Some o when o = o' -> (oo, c :: l, res) + | Some o -> (Some o', [c], (o, l) :: res)) + (None, [], []) |> + (function + | (None, _, _) -> [] + | (Some o, l, res) -> (o, l) :: res) + +let sort_uniq cmp l = + List.fast_sort cmp l |> + List.fold_left (fun (eo, l) e' -> + match eo with + | None -> (Some e', l) + | Some e when cmp e e' = 0 -> (eo, l) + | Some e -> (Some e', e :: l)) + (None, []) |> + (function + | (None, _) -> [] + | (Some e, l) -> List.rev (e :: l)) + +let setify l = + sort_uniq compare l + +let normalize (point: ('a cursor) list) = + setify point + +let next_binary tmp s1 s2 = + let pm w (_, p) = pattern_match p w in + let o1 = binops `L s1 |> + List.filter (pm s2.seen) |> + List.map fst in + let o2 = binops `R s2 |> + List.filter (pm s1.seen) |> + List.map fst in + List.map (fun (o, l) -> + o, + { id = -1 + ; seen = Bnr (o, s1.seen, s2.seen) + ; point = normalize (l @ tmp) }) + (group_by_fst (o1 @ o2)) + +type p = string + +module StateSet : sig + type t + val create: unit -> t + val add: t -> p state -> + [> `Added | `Found ] * p state + val iter: t -> (p state -> unit) -> unit + val elems: t -> (p state) list +end = struct + open Hashtbl.Make(struct + type t = p state + let equal s1 s2 = s1.point = s2.point + let hash s = Hashtbl.hash s.point + end) + type nonrec t = + { h: int t + ; mutable next_id: int } + let create () = + { h = create 500; next_id = 0 } + let add set s = + assert (s.point = normalize s.point); + try + let id = find set.h s in + `Found, {s with id} + with Not_found -> begin + let id = set.next_id in + set.next_id <- id + 1; + add set.h s id; + `Added, {s with id} + end + let iter set f = + let f s id = f {s with id} in + iter f set.h + let elems set = + let res = ref [] in + iter set (fun s -> res := s :: !res); + !res +end + +type table_key = + | K of op * p state * p state + +module StateMap = struct + include Map.Make(struct + type t = table_key + let compare ka kb = + match ka, kb with + | K (o, sl, sr), K (o', sl', sr') -> + compare (o, sl.id, sr.id) + (o', sl'.id, sr'.id) + end) + let invert n sm = + let rmap = Array.make n [] in + iter (fun k {id; _} -> + match k with + | K (o, sl, sr) -> + rmap.(id) <- + (o, (sl.id, sr.id)) :: rmap.(id) + ) sm; + Array.map group_by_fst rmap + let by_ops sm = + fold (fun tk s ops -> + match tk with + | K (op, l, r) -> + (op, ((l.id, r.id), s.id)) :: ops) + sm [] |> group_by_fst +end + +type rule = + { name: string + ; vars: string list + ; pattern: pattern } + +let generate_table rl = + let states = StateSet.create () in + let rl = + (* these atomic patterns must occur in + * rules so that we are able to number + * all possible refs *) + [ { name = "$"; vars = [] + ; pattern = Atm AnyCon } + ; { name = "%"; vars = [] + ; pattern = Atm Tmp } ] @ rl + in + (* initialize states *) + let ground = + List.concat_map + (fun r -> peel r.pattern r.name) rl |> + group_by_fst + in + let tmp = List.assoc (Atm Tmp) ground in + let con = List.assoc (Atm AnyCon) ground in + let atoms = ref [] in + let () = + List.iter (fun (seen, l) -> + let point = + if pattern_match (Atm Tmp) seen + then normalize (tmp @ l) + else normalize (con @ l) + in + let s = {id = -1; seen; point} in + let _, s = StateSet.add states s in + match get_atomic seen with + | Some atm -> atoms := (atm, s) :: !atoms + | None -> () + ) ground + in + (* setup loop state *) + let map = ref StateMap.empty in + let map_add k s' = + map := StateMap.add k s' !map + in + let flag = ref `Added in + let flagmerge = function + | `Added -> flag := `Added + | _ -> () + in + (* iterate until fixpoint *) + while !flag = `Added do + flag := `Stop; + let statel = StateSet.elems states in + iter_pairs statel (fun (sl, sr) -> + next_binary tmp sl sr |> + List.iter (fun (o, s') -> + let flag', s' = + StateSet.add states s' in + flagmerge flag'; + map_add (K (o, sl, sr)) s'; + )); + done; + let states = + StateSet.elems states |> + List.sort (fun s s' -> compare s.id s'.id) |> + Array.of_list + in + (states, !atoms, !map) + +let intersperse x l = + let rec go left right out = + let out = + (List.rev left @ [x] @ right) :: + out in + match right with + | x :: right' -> + go (x :: left) right' out + | [] -> out + in go [] l [] + +let rec permute = function + | [] -> [[]] + | x :: l -> + List.concat (List.map + (intersperse x) (permute l)) + +(* build all binary trees with ordered + * leaves l *) +let rec bins build l = + let rec go l r out = + match r with + | [] -> out + | x :: r' -> + go (l @ [x]) r' + (fold_pairs + (bins build l) + (bins build r) + out (fun (l, r) out -> + build l r :: out)) + in + match l with + | [] -> [] + | [x] -> [x] + | x :: l -> go [x] l [] + +let products l ini f = + let rec go acc la = function + | [] -> f (List.rev la) acc + | xs :: l -> + List.fold_left (fun acc x -> + go acc (x :: la) l) + acc xs + in go ini [] l + +(* combinatorial nuke... *) +let rec ac_equiv = + let rec alevel o = function + | Bnr (o', l, r) when o' = o -> + alevel o l @ alevel o r + | x -> [x] + in function + | Bnr (o, _, _) as p + when associative o -> + products + (List.map ac_equiv (alevel o p)) [] + (fun choice out -> + List.concat_map + (bins (fun l r -> Bnr (o, l, r))) + (if commutative o + then permute choice + else [choice]) @ out) + | Bnr (o, l, r) + when commutative o -> + fold_pairs + (ac_equiv l) (ac_equiv r) [] + (fun (l, r) out -> + Bnr (o, l, r) :: + Bnr (o, r, l) :: out) + | Bnr (o, l, r) -> + fold_pairs + (ac_equiv l) (ac_equiv r) [] + (fun (l, r) out -> + Bnr (o, l, r) :: out) + | x -> [x] + +module Action: sig + type node = + | Switch of (int * t) list + | Push of bool * t + | Pop of t + | Set of string * t + | Stop + and t = private + { id: int; node: node } + val equal: t -> t -> bool + val size: t -> int + val stop: t + val mk_push: sym:bool -> t -> t + val mk_pop: t -> t + val mk_set: string -> t -> t + val mk_switch: int list -> (int -> t) -> t + val pp: Format.formatter -> t -> unit +end = struct + type node = + | Switch of (int * t) list + | Push of bool * t + | Pop of t + | Set of string * t + | Stop + and t = + { id: int; node: node } + + let equal a a' = a.id = a'.id + let size a = + let seen = Hashtbl.create 10 in + let rec node_size = function + | Switch l -> + List.fold_left + (fun n (_, a) -> n + size a) 0 l + | (Push (_, a) | Pop a | Set (_, a)) -> + size a + | Stop -> 0 + and size {id; node} = + if Hashtbl.mem seen id + then 0 + else begin + Hashtbl.add seen id (); + 1 + node_size node + end + in + size a + + let mk = + let hcons = Hashtbl.create 100 in + let fresh = ref 0 in + fun node -> + let id = + try Hashtbl.find hcons node + with Not_found -> + let id = !fresh in + Hashtbl.add hcons node id; + fresh := id + 1; + id + in + {id; node} + let stop = mk Stop + let mk_push ~sym a = mk (Push (sym, a)) + let mk_pop a = + match a.node with + | Stop -> a + | _ -> mk (Pop a) + let mk_set v a = mk (Set (v, a)) + let mk_switch ids f = + match List.map f ids with + | [] -> failwith "empty switch"; + | c :: cs as cases -> + if List.for_all (equal c) cs then c + else + let cases = List.combine ids cases in + mk (Switch cases) + + open Format + let rec pp_node fmt = function + | Switch l -> + fprintf fmt "@[@[switch{"; + let pp_case (c, a) = + let pp_sep fmt () = fprintf fmt "," in + fprintf fmt "@,@[<2>→%a:@ @[%a@]@]" + (pp_print_list ~pp_sep pp_print_int) + c pp a + in + inverse l |> group_by_fst |> inverse |> + List.iter pp_case; + fprintf fmt "@]@,}@]" + | Push (true, a) -> fprintf fmt "pushsym@ %a" pp a + | Push (false, a) -> fprintf fmt "push@ %a" pp a + | Pop a -> fprintf fmt "pop@ %a" pp a + | Set (v, a) -> fprintf fmt "set(%s)@ %a" v pp a + | Stop -> fprintf fmt "•" + and pp fmt a = pp_node fmt a.node +end + +(* a state is commutative if (a op b) enters + * it iff (b op a) enters it as well *) +let symmetric rmap id = + List.for_all (fun (_, l) -> + let l1, l2 = + List.filter (fun (a, b) -> a <> b) l |> + List.partition (fun (a, b) -> a < b) + in + setify l1 = setify (inverse l2)) + rmap.(id) + +(* left-to-right matching of a set of patterns; + * may raise if there is no lr matcher for the + * input rule *) +let lr_matcher statemap states rules name = + let rmap = + let nstates = Array.length states in + StateMap.invert nstates statemap + in + let exception Stuck in + (* the list of ids represents a class of terms + * whose root ends up being labelled with one + * such id; the gen function generates a matcher + * that will, given any such term, assign values + * for the Var nodes of one pattern in pats *) + let rec gen + : 'a. int list -> (pattern * 'a) list + -> (int -> (pattern * 'a) list -> Action.t) + -> Action.t + = fun ids pats k -> + Action.mk_switch (setify ids) @@ fun id_top -> + let sym = symmetric rmap id_top in + let id_ops = + if sym then + let ordered (a, b) = a <= b in + List.map (fun (o, l) -> + (o, List.filter ordered l)) + rmap.(id_top) + else rmap.(id_top) + in + (* consider only the patterns that are + * compatible with the current id *) + let atm_pats, bin_pats = + List.filter (function + | Bnr (o, _, _), _ -> + List.exists + (fun (o', _) -> o' = o) + id_ops + | _ -> true) pats |> + List.partition + (fun (pat, _) -> is_atomic pat) + in + try + if bin_pats = [] then raise Stuck; + let pats_l = + List.map (function + | (Bnr (o, l, r), x) -> + (l, (o, x, r)) + | _ -> assert false) + bin_pats + and pats_r = + List.map (fun (l, (o, x, r)) -> + (r, (o, l, x))) + and patstop = + List.map (fun (r, (o, l, x)) -> + (Bnr (o, l, r), x)) + in + let id_pairs = List.concat_map snd id_ops in + let ids_l = List.map fst id_pairs + and ids_r id_left = + List.filter_map (fun (l, r) -> + if l = id_left then Some r else None) + id_pairs + in + (* match the left arm *) + Action.mk_push ~sym + (gen ids_l pats_l + @@ fun lid pats -> + (* then the right arm, considering + * only the remaining possible + * patterns and knowing that the + * left arm was numbered 'lid' *) + Action.mk_pop + (gen (ids_r lid) (pats_r pats) + @@ fun _rid pats -> + (* continue with the parent *) + k id_top (patstop pats))) + with Stuck -> + let atm_pats = + let seen = states.(id_top).seen in + List.filter (fun (pat, _) -> + pattern_match pat seen) atm_pats + in + if atm_pats = [] then raise Stuck else + let vars = + List.filter_map (function + | (Var (v, _), _) -> Some v + | _ -> None) atm_pats |> setify + in + match vars with + | [] -> k id_top atm_pats + | [v] -> Action.mk_set v (k id_top atm_pats) + | _ -> failwith "ambiguous var match" + in + (* generate a matcher for the rule *) + let ids_top = + Array.to_list states |> + List.filter_map (fun {id; point = p; _} -> + if List.exists ((=) (Top name)) p then + Some id + else None) + in + let rec filter_dups pats = + match pats with + | p :: pats -> + if List.exists (pattern_match p) pats + then filter_dups pats + else p :: filter_dups pats + | [] -> [] + in + let pats_top = + List.filter_map (fun r -> + if r.name = name then + Some r.pattern + else None) rules |> + filter_dups |> + List.map (fun p -> (p, ())) + in + gen ids_top pats_top (fun _ pats -> + assert (pats <> []); + Action.stop) + +type numberer = + { atoms: (atomic_pattern * p state) list + ; statemap: p state StateMap.t + ; states: p state array + ; mutable ops: op list + (* memoizes the list of possible operations + * according to the statemap *) } + +let make_numberer sa am sm = + { atoms = am + ; states = sa + ; statemap = sm + ; ops = [] } + +let atom_state n atm = + List.assoc atm n.atoms diff --git a/src/qbe/tools/mgen/sexp.ml b/src/qbe/tools/mgen/sexp.ml new file mode 100644 index 00000000..624619ed --- /dev/null +++ b/src/qbe/tools/mgen/sexp.ml @@ -0,0 +1,292 @@ +type pstate = + { data: string + ; line: int + ; coln: int + ; indx: int } + +type perror = + { error: string + ; ps: pstate } + +exception ParseError of perror + +type 'a parser = + { fn: 'r. pstate -> ('a -> pstate -> 'r) -> 'r } + +let update_pos ps beg fin = + let l, c = (ref ps.line, ref ps.coln) in + for i = beg to fin - 1 do + if ps.data.[i] = '\n' then + (incr l; c := 0) + else + incr c + done; + { ps with line = !l; coln = !c } + +let pret (type a) (x: a): a parser = + let fn ps k = k x ps in { fn } + +let pfail error: 'a parser = + let fn ps _ = raise (ParseError {error; ps}) + in { fn } + +let por: 'a parser -> 'a parser -> 'a parser = + fun p1 p2 -> + let fn ps k = + try p1.fn ps k with ParseError e1 -> + try p2.fn ps k with ParseError e2 -> + if e1.ps.indx > e2.ps.indx then + raise (ParseError e1) + else + raise (ParseError e2) + in { fn } + +let pbind: 'a parser -> ('a -> 'b parser) -> 'b parser = + fun p1 p2 -> + let fn ps k = + p1.fn ps (fun x ps -> (p2 x).fn ps k) + in { fn } + +(* handy for recursive rules *) +let papp p x = pbind (pret x) p + +let psnd: 'a parser -> 'b parser -> 'b parser = + fun p1 p2 -> pbind p1 (fun _x -> p2) + +let pfst: 'a parser -> 'b parser -> 'a parser = + fun p1 p2 -> pbind p1 (fun x -> psnd p2 (pret x)) + +module Infix = struct + let ( let* ) = pbind + let ( ||| ) = por + let ( |<< ) = pfst + let ( |>> ) = psnd +end + +open Infix + +let pre: ?what:string -> string -> string parser = + fun ?what re -> + let what = + match what with + | None -> Printf.sprintf "%S" re + | Some what -> what + and re = Str.regexp re in + let fn ps k = + if not (Str.string_match re ps.data ps.indx) then + (let error = + Printf.sprintf "expected to match %s" what in + raise (ParseError {error; ps})); + let ps = + let indx = Str.match_end () in + { (update_pos ps ps.indx indx) with indx } + in + k (Str.matched_string ps.data) ps + in { fn } + +let peoi: unit parser = + let fn ps k = + if ps.indx <> String.length ps.data then + raise (ParseError + { error = "expected end of input"; ps }); + k () ps + in { fn } + +let pws = pre "[ \r\n\t*]*" +let pws1 = pre "[ \r\n\t*]+" + +let pthen p1 p2 = + let* x1 = p1 in + let* x2 = p2 in + pret (x1, x2) + +let rec plist_tail: 'a parser -> ('a list) parser = + fun pitem -> + (pws |>> pre ")" |>> pret []) ||| + (let* itm = pitem in + let* itms = plist_tail pitem in + pret (itm :: itms)) + +let plist pitem = + pws |>> pre ~what:"a list" "(" + |>> plist_tail pitem + +let plist1p p1 pitem = + pws |>> pre ~what:"a list" "(" + |>> pthen p1 (plist_tail pitem) + +let ppair p1 p2 = + pws |>> pre ~what:"a pair" "(" + |>> pthen p1 p2 |<< pws |<< pre ")" + +let run_parser p s = + let ps = + {data = s; line = 1; coln = 0; indx = 0} in + try `Ok (p.fn ps (fun res _ps -> res)) + with ParseError e -> + let rec bol i = + if i = 0 then i else + if i < String.length s && s.[i] = '\n' + then i+1 (* XXX BUG *) + else bol (i-1) + in + let rec eol i = + if i = String.length s then i else + if s.[i] = '\n' then i else + eol (i+1) + in + let bol = bol e.ps.indx in + let eol = eol e.ps.indx in + (* + Printf.eprintf "bol:%d eol:%d indx:%d len:%d\n" + bol eol e.ps.indx (String.length s); (* XXX debug *) + *) + let lines = + String.split_on_char '\n' + (String.sub s bol (eol - bol)) + in + let nl = List.length lines in + let caret = ref (e.ps.indx - bol) in + let msg = ref [] in + let pfx = " > " in + lines |> List.iteri (fun ln l -> + if ln <> nl - 1 || l <> "" then begin + let ll = String.length l + 1 in + msg := (pfx ^ l ^ "\n") :: !msg; + if !caret <= ll then begin + let pad = String.make !caret ' ' in + msg := (pfx ^ pad ^ "^\n") :: !msg; + end; + caret := !caret - ll; + end; + ); + `Error + ( e.ps, e.error + , String.concat "" (List.rev !msg) ) + +(* ---------------------------------------- *) +(* pattern parsing *) +(* ---------------------------------------- *) +(* Example syntax: + + (with-vars (a b c d) + (patterns + (ob (add (tmp a) (con d))) + (bsm (add (tmp b) (mul (tmp m) (con 2 4 8)))) )) + *) +open Match + +let pint64 = + let* s = pre "[-]?[0-9_]+" in + pret (Int64.of_string s) + +let pid = + pre ~what:"an identifer" + "[a-zA-Z][a-zA-Z0-9_]*" + +let pop_base = + let sob, obs = show_op_base, op_bases in + let* s = pre ~what:"an operator" + (String.concat "\\|" (List.map sob obs)) + in pret (List.find (fun o -> s = sob o) obs) + +let pop = let* ob = pop_base in pret (Kl, ob) + +let rec ppat vs = + let pcons_tail = + let* cs = plist_tail (pws1 |>> pint64) in + match cs with + | [] -> pret [AnyCon] + | _ -> pret (List.map (fun c -> Con c) cs) + in + let pvar = + let* id = pid in + if not (List.mem id vs) then + pfail ("unbound variable: " ^ id) + else + pret id + in + pws |>> ( + ( let* c = pint64 in pret [Atm (Con c)] ) + ||| + ( pre "(con)" |>> pret [Atm AnyCon] ) ||| + ( let* cs = pre "(con" |>> pcons_tail in + pret (List.map (fun c -> Atm c) cs) ) ||| + ( let* v = pre "(con" |>> pws1 |>> pvar in + let* cs = pcons_tail in + pret (List.map (fun c -> Var (v, c)) cs) ) + ||| + ( pre "(tmp)" |>> pret [Atm Tmp] ) ||| + ( let* v = pre "(tmp" |>> pws1 |>> pvar in + pws |>> pre ")" |>> pret [Var (v, Tmp)] ) + ||| + ( let* (op, rands) = + plist1p (pws |>> pop) (papp ppat vs) in + let nrands = List.length rands in + if nrands < 2 then + pfail ( "binary op requires at least" + ^ " two arguments" ) + else + let mk x y = Bnr (op, x, y) in + pret + (products rands [] + (fun rands pats -> + (* construct a left-heavy tree *) + let r0 = List.hd rands in + let rs = List.tl rands in + List.fold_left mk r0 rs :: pats)) ) + ) + +let pwith_vars ?(vs = []) p = + ( let* vs = + pws |>> pre "(with-vars" |>> pws |>> + plist (pws |>> pid) + in pws |>> p vs |<< pws |<< pre ")" ) + ||| p vs + +let ppats = + pwith_vars @@ fun vs -> + pre "(patterns" |>> plist_tail + (pwith_vars ~vs @@ fun vs -> + let* n, ps = ppair pid (ppat vs) in + pret (n, vs, ps)) + +(* ---------------------------------------- *) +(* tests *) +(* ---------------------------------------- *) + +let () = + if false then + let show_patterns ps = + "[" ^ String.concat "; " + (List.map show_pattern ps) ^ "]" + in + let pat s = + Printf.printf "parse %s = " s; + let vars = + [ "foobar"; "a"; "b"; "d" + ; "m"; "s"; "x" ] + in + match run_parser (ppat vars) s with + | `Ok p -> + Printf.printf "%s\n" (show_patterns p) + | `Error (_, e, _) -> + Printf.printf "ERROR: %s\n" e + in + pat "42"; + pat "(tmp)"; + pat "(tmp foobar)"; + pat "(con)"; + pat "(con 1 2 3)"; + pat "(con x 1 2 3)"; + pat "(add 1 2)"; + pat "(add 1 2 3 4)"; + pat "(sub 1 2)"; + pat "(sub 1 2 3)"; + pat "(tmp unbound_var)"; + pat "(add 0)"; + pat "(add 1 (add 2 3))"; + pat "(add (tmp a) (con d))"; + pat "(add (tmp b) (mul (tmp m) (con s 2 4 8)))"; + pat "(add (con 1 2) (con 3 4))"; + () diff --git a/src/qbe/tools/mgen/test.ml b/src/qbe/tools/mgen/test.ml new file mode 100644 index 00000000..9ef40b9d --- /dev/null +++ b/src/qbe/tools/mgen/test.ml @@ -0,0 +1,134 @@ +open Match +open Fuzz +open Cgen + +(* unit tests *) + +let test_pattern_match = + let pm = pattern_match + and nm = fun x y -> not (pattern_match x y) in + begin + assert (nm (Atm Tmp) (Atm (Con 42L))); + assert (pm (Atm AnyCon) (Atm (Con 42L))); + assert (nm (Atm (Con 42L)) (Atm AnyCon)); + assert (nm (Atm (Con 42L)) (Atm Tmp)); + end + +let test_peel = + let o = Kw, Oadd in + let p = Bnr (o, Bnr (o, Atm Tmp, Atm Tmp), + Atm (Con 42L)) in + let l = peel p () in + let () = assert (List.length l = 3) in + let atomic_p (p, _) = + match p with Atm _ -> true | _ -> false in + let () = assert (List.for_all atomic_p l) in + let l = List.map (fun (p, c) -> fold_cursor c p) l in + let () = assert (List.for_all ((=) p) l) in + () + +let test_fold_pairs = + let l = [1; 2; 3; 4; 5] in + let p = fold_pairs l l [] (fun a b -> a :: b) in + let () = assert (List.length p = 25) in + let p = sort_uniq compare p in + let () = assert (List.length p = 25) in + () + +(* test pattern & state *) + +let print_sm oc = + StateMap.iter (fun k s' -> + match k with + | K (o, sl, sr) -> + let top = + List.fold_left (fun top c -> + match c with + | Top r -> top ^ " " ^ r + | _ -> top) "" s'.point + in + Printf.fprintf oc + " (%s %d %d) -> %d%s\n" + (show_op o) + sl.id sr.id s'.id top) + +let rules = + let oa = Kl, Oadd in + let om = Kl, Omul in + let va = Var ("a", Tmp) + and vb = Var ("b", Tmp) + and vc = Var ("c", Tmp) + and vs = Var ("s", Tmp) in + let vars = ["a"; "b"; "c"; "s"] in + let rule name pattern = + List.map + (fun pattern -> {name; vars; pattern}) + (ac_equiv pattern) + in + match `X64Addr with + (* ------------------------------- *) + | `X64Addr -> + (* o + b *) + rule "ob" (Bnr (oa, Atm Tmp, Atm AnyCon)) + @ (* b + s * m *) + rule "bsm" (Bnr (oa, vb, Bnr (om, Var ("m", Con 2L), vs))) + @ + rule "bsm" (Bnr (oa, vb, Bnr (om, Var ("m", Con 4L), vs))) + @ + rule "bsm" (Bnr (oa, vb, Bnr (om, Var ("m", Con 8L), vs))) + @ (* b + s *) + rule "bs1" (Bnr (oa, vb, vs)) + @ (* o + s * m *) + (* rule "osm" (Bnr (oa, Atm AnyCon, Bnr (om, Atm (Con 4L), Atm Tmp))) *) [] + @ (* o + b + s *) + rule "obs1" (Bnr (oa, Bnr (oa, Var ("o", AnyCon), vb), vs)) + @ (* o + b + s * m *) + rule "obsm" (Bnr (oa, Bnr (oa, Var ("o", AnyCon), vb), + Bnr (om, Var ("m", Con 2L), vs))) + @ + rule "obsm" (Bnr (oa, Bnr (oa, Var ("o", AnyCon), vb), + Bnr (om, Var ("m", Con 4L), vs))) + @ + rule "obsm" (Bnr (oa, Bnr (oa, Var ("o", AnyCon), vb), + Bnr (om, Var ("m", Con 8L), vs))) + (* ------------------------------- *) + | `Add3 -> + [ { name = "add" + ; vars = [] + ; pattern = Bnr (oa, va, Bnr (oa, vb, vc)) } ] @ + [ { name = "add" + ; vars = [] + ; pattern = Bnr (oa, Bnr (oa, va, vb), vc) } ] + +(* + +let sa, am, sm = generate_table rules +let () = + Array.iteri (fun i s -> + Format.printf "@[state %d: %s@]@." + i (show_pattern s.seen)) + sa +let () = print_sm stdout sm; flush stdout + +let matcher = lr_matcher sm sa rules "obsm" (* XXX *) +let () = Format.printf "@[%a@]@." Action.pp matcher +let () = Format.printf "@[matcher size: %d@]@." (Action.size matcher) + +let numbr = make_numberer sa am sm + +let () = + let opts = { pfx = "" + ; static = true + ; oc = stdout } in + emit_c opts numbr; + emit_matchers opts + [ ( ["b"; "o"; "s"; "m"] + , "obsm" + , matcher ) ] + +(* +let tp = fuzz_numberer rules numbr +let () = test_matchers tp numbr rules +*) + +*) diff --git a/src/qbe/tools/pmov.c b/src/qbe/tools/pmov.c new file mode 100644 index 00000000..ffc38eaf --- /dev/null +++ b/src/qbe/tools/pmov.c @@ -0,0 +1,262 @@ +/*% rm -f rega.o main.o && cc -g -std=c99 -Wall -DTEST_PMOV -o pmov % *.o + * + * This is a test framwork for the dopm() function + * in rega.c, use it when you want to modify it or + * all the parallel move functions. + * + * You might need to decrease NIReg to see it + * terminate, I used NIReg == 7 at most. + */ +#include +#include +#include + +static void assert_test(char *, int), fail(void), iexec(int *); + +#include "../../rega.c" + +static void bsinit_(BSet *, uint); + +static RMap mbeg; +static Ins ins[NIReg], *ip; +static Blk dummyb = { .ins = ins }; + +int +main() +{ + Ins *i1; + unsigned long long tm, rm, cnt; + RMap mend; + int reg[NIReg], val[NIReg+1]; + int t, i, r, nr; + + tmp = (Tmp[Tmp0+NIReg]){{{0}}}; + for (t=0; t= Tmp0) { + tmp[t].cls = Kw; + tmp[t].hint.r = -1; + tmp[t].hint.m = 0; + tmp[t].slot = -1; + sprintf(tmp[t].name, "tmp%d", t-Tmp0+1); + } + + bsinit_(mbeg.b, Tmp0+NIReg); + bsinit_(mend.b, Tmp0+NIReg); + cnt = 0; + for (tm = 0; tm < 1ull << (2*NIReg); tm++) { + mbeg.n = 0; + bszero(mbeg.b); + ip = ins; + + /* find what temporaries are in copy and + * wether or not they are in register + */ + for (t=0; t> (2*t)) & 3) { + case 0: + /* not in copy, not in reg */ + break; + case 1: + /* not in copy, in reg */ + radd(&mbeg, Tmp0+t, t+1); + break; + case 2: + /* in copy, not in reg */ + *ip++ = (Ins){OCopy, Kw, TMP(Tmp0+t), {R, R}}; + break; + case 3: + /* in copy, in reg */ + *ip++ = (Ins){OCopy, Kw, TMP(Tmp0+t), {R, R}}; + radd(&mbeg, Tmp0+t, t+1); + break; + } + + if (ip == ins) + /* cancel if the parallel move + * is empty + */ + goto Nxt; + + /* find registers for temporaries + * in mbeg + */ + nr = ip - ins; + rm = (1ull << (nr+1)) - 1; + for (i=0; iarg[0] = TMP(reg[i]); + + /* compile the parallel move + */ + rcopy(&mend, &mbeg); + dopm(&dummyb, ip-1, &mend); + cnt++; + + /* check that mend contain mappings for + * source registers and does not map any + * assigned temporary, then check that + * all temporaries in mend are mapped in + * mbeg and not used in the copy + */ + for (i1=ins; i1arg[0].val; + assert(rfree(&mend, r) == r); + t = i1->to.val; + assert(!bshas(mend.b, t)); + } + for (i=0; i> (2*t)) & 3) == 1); + } + + /* execute the code generated and check + * that all assigned temporaries got their + * value, and that all live variables's + * content got preserved + */ + for (i=1; i<=NIReg; i++) + val[i] = i; + iexec(val); + for (i1=ins; i1to.val; + r = rfind(&mbeg, t); + if (r != -1) + assert(val[r] == i1->arg[0].val); + } + for (i=0; i 0 && \ + r.val <= NIReg + +static void +iexec(int val[]) +{ + Ins *i; + int t; + + for (i=insb; iop) { + default: + assert(!"iexec: missing case\n"); + exit(1); + case OSwap: + assert(validr(i->arg[0])); + assert(validr(i->arg[1])); + t = val[i->arg[0].val]; + val[i->arg[0].val] = val[i->arg[1].val]; + val[i->arg[1].val] = t; + break; + case OCopy: + assert(validr(i->to)); + assert(validr(i->arg[0])); + val[i->to.val] = val[i->arg[0].val]; + break; + } +} + + +/* failure diagnostics */ + +static int re; + +static void +replay() +{ + RMap mend; + + re = 1; + bsinit_(mend.b, Tmp0+NIReg); + rcopy(&mend, &mbeg); + dopm(&dummyb, ip-1, &mend); +} + +static void +fail() +{ + Ins *i1; + int i; + + printf("\nIn registers: "); + for (i=0; ito.val].name, + i1->arg[0].val); + replay(); + abort(); +} + +static void +assert_test(char *s, int x) +{ + if (x) + return; + if (re) + abort(); + printf("!assertion failure: %s\n", s); + fail(); +} + +static void +bsinit_(BSet *bs, uint n) +{ + n = (n + NBit-1) / NBit; + bs->nt = n; + bs->t = emalloc(n * sizeof bs->t[0]); +} + +/* symbols required by the linker */ +char debug['Z'+1]; diff --git a/src/qbe/tools/test.sh b/src/qbe/tools/test.sh new file mode 100755 index 00000000..0df297fd --- /dev/null +++ b/src/qbe/tools/test.sh @@ -0,0 +1,267 @@ +#!/bin/sh + +dir=`dirname "$0"` +if [ -z "${bin:-}" ]; then + bin=$dir/../qbe +fi +if [ -z "${binref:-}" ]; then + binref=${bin}.ref +fi + +tmp=/tmp/qbe.zzzz + +drv=$tmp.c +asm=$tmp.s +asmref=$tmp.ref.s +exe=$tmp.exe +out=$tmp.out + +qemu_not_needed() { + "$@" +} + +cc= +find_cc_and_qemu() { + if [ -n "$cc" ]; then + return + fi + target="$1" + candidate_cc="$2" + if $candidate_cc -v >/dev/null 2>&1; then + cc=$candidate_cc + echo "cc: $cc" + + if [ "$target" = "$(uname -m)" ] + then + qemu=qemu_not_needed + echo "qemu: not needed, testing native architecture" + else + qemu="$3" + if $qemu -version >/dev/null 2>&1 + then + sysroot=$($candidate_cc -print-sysroot) + if [ -n "$sysroot" ]; then + qemu="$qemu -L $sysroot" + fi + echo "qemu: $qemu" + elif $qemu --version >/dev/null 2>&1 + then + # wine + : + else + qemu= + echo "qemu: not found" + fi + fi + echo + + fi +} + +init() { + case "$TARGET" in + arm64) + for p in aarch64-linux-musl aarch64-linux-gnu + do + find_cc_and_qemu aarch64 "$p-gcc -no-pie -static" "qemu-aarch64" + done + if test -z "$cc" -o -z "$qemu" + then + echo "Cannot find arm64 compiler or qemu." + exit 77 + fi + bin="$bin -t arm64" + ;; + rv64) + for p in riscv64-linux-musl riscv64-linux-gnu + do + find_cc_and_qemu riscv64 "$p-gcc -no-pie -static" "qemu-riscv64" + done + if test -z "$cc" -o -z "$qemu" + then + echo "Cannot find riscv64 compiler or qemu." + exit 77 + fi + bin="$bin -t rv64" + ;; + x86_64) + for p in x86_64-linux-musl x86_64-linux-gnu + do + find_cc_and_qemu x86_64 "$p-gcc -no-pie -static" "qemu-x86_64" + done + if test -z "$cc" -o -z "$qemu" + then + echo "Cannot find x86_64 compiler or qemu." + exit 77 + fi + bin="$bin -t amd64_sysv" + ;; + amd64_win) + for p in x86_64-w64-mingw32 + do + find_cc_and_qemu x86_64-w64 "$p-gcc -static" "wine" + done + if test -z "$cc" + then + echo "Cannot find windows compiler or wine." + exit 1 + fi + export WINEDEBUG=-all + bin="$bin -t amd64_win" + ;; + "") + case `uname` in + *Darwin*) + cc="cc" + ;; + *OpenBSD*) + cc="cc -nopie -lpthread" + ;; + *FreeBSD*) + cc="cc -lpthread" + ;; + *) + cc="${CC:-cc}" + ccpost="-lpthread" + ;; + esac + TARGET=`$bin -t?` + ;; + *) + echo "Unknown target '$TARGET'." + exit 77 + ;; + esac +} + +cleanup() { + rm -f $drv $asm $exe $out +} + +extract() { + WHAT="$1" + FILE="$2" + + awk " + /^# >>> $WHAT/ { + p = 1 + next + } + /^# <<&2 + exit 1 + fi + + if + sed -e 1q $t | + grep "skip.* $TARGET\( .*\)\?$" \ + >/dev/null + then + return 0 + fi + + printf "%-45s" "$(basename $t)..." + + if ! $bin -o $asm $t + then + echo "[qbe fail]" + return 1 + fi + + if test -x $binref + then + $binref -o $asmref $t 2>/dev/null + fi + + extract driver $t > $drv + extract output $t > $out + + if test -s $drv + then + src="$drv $asm" + else + src="$asm" + fi + + if ! $cc -g -o $exe $src $ccpost + then + echo "[cc fail]" + return 1 + fi + + if test -s $out + then + $qemu $exe a b c | tr -d '\r' | diff -u - $out + ret=$? + reason="output" + else + $qemu $exe a b c + ret=$? + reason="returned $ret" + fi + + if test $ret -ne 0 + then + echo "[$reason fail]" + return 1 + fi + + echo "[ok]" + + if test -f $asmref && ! cmp -s $asm $asmref + then + loc0=`wc -l $asm | cut -d' ' -f1` + loc1=`wc -l $asmref | cut -d' ' -f1` + printf " asm diff: %+d\n" $(($loc0 - $loc1)) + return 0 + fi +} + +#trap cleanup TERM QUIT + +init + +if test -z "$1" +then + echo "usage: tools/test.sh {all, SSAFILE}" 2>&1 + exit 1 +fi + +case "$1" in +"all") + fail=0 + count=0 + for t in $dir/../test/[!_]*.ssa + do + once $t + fail=`expr $fail + $?` + count=`expr $count + 1` + done + if test $fail -ge 1 + then + echo + echo "$fail of $count tests failed!" + else + echo + echo "All is fine!" + fi + exit $fail + ;; +*) + once $1 + exit $? + ;; +esac diff --git a/src/qbe/tools/vatest.py b/src/qbe/tools/vatest.py new file mode 100644 index 00000000..1a1f1991 --- /dev/null +++ b/src/qbe/tools/vatest.py @@ -0,0 +1,161 @@ +# generate variadic calls to test the +# abi implementation + +from random import seed, randint, uniform +from struct import unpack + +I, D = 'd', 'g' + +formats = [ + # list of formats to test + [I], + [D], + [I,D], + [D,D], + [I,I,I,I], + [D,D,D,D], + [I,D,I,D], + [D,D,I,I], + [I,I,D,D], + [], +] + +generate = [ + # numbers of fixed integer and + # floating point arguments to + # test + (0, 0), (1, 0), (0, 1), (4, 0), + (0, 6), (5, 7), (10, 10), (9, 0), +] + +def mkargs(nargs, type, name): + args = map( + lambda n: ''.join([type, name, str(n), ', ']), + range(nargs) + ) + return ''.join(args) + +def mkfstr(fmt): + fstr = map( + lambda x: {I: '%d ', D: '%g '}[x], + fmt + ) + return '"' + ''.join(fstr) + '\\n"' + +def randargs(fmt): + ra = { + I: lambda: '{}'.format(randint(-10, 10)), + D: lambda: '{0:.4g}'.format(uniform(-10, 10)) + } + return list(map(lambda x: ra[x](), fmt)) + +def genssa(qbeprint, qbecall): + funcs = [('qbeprint', qbeprint), ('qbecall', qbecall)] + for fnum, (nia, nfa) in enumerate(generate): + params = "{}{}l %fmt, ...".format( + mkargs(nia, 'w ', '%argw'), + mkargs(nfa, 'd ', '%argd') + ) + for name, code in funcs: + print('export function ${}{}({}) {}' + .format(name, fnum, params, code) + ) + +def gendriver(): + print('# >>> driver') + print('# #include ') + print('# #include ') + + for fnum, (nia, nfa) in enumerate(generate): + params = "{}{}char *, ...".format( + mkargs(nia, 'int ', 'argw'), + mkargs(nfa, 'double ', 'argd') + ) + for name in ['qbeprint', 'qbecall']: + print('# extern void {}{}({});' + .format(name, fnum, params) + ) + + output = '' + print('# int print(char *fmt, va_list *ap) {') + print('# return vprintf(fmt, *ap);'); + print('# }') + print('# int main() {') + + for fnum, (nia, nfa) in enumerate(generate): + info = '# ({} int, {} double)'.format(nia, nfa) + print('# puts("{}");'.format(info)) + output += '# {}\n'.format(info) + for fmt in formats: + ra = randargs(fmt) + vaargs = ', '.join(ra) + expect = ' '.join(ra) + if fmt: + vaargs = ', ' + vaargs + expect = expect + ' ' + args = ''.join( + ['0, '] * (nia+nfa) + + [mkfstr(fmt), vaargs] + ) + for name in ['qbeprint', 'qbecall']: + print('# {}{}({});' + .format(name, fnum, args) + ) + output += '# {}\n'.format(expect) + + print('# }') + print('# <<<') + + print('\n# >>> output\n' + output + '# <<<') + + +qbeprint="""{{ +@start + %fmtdbl =l alloc4 4 + %fmtint =l alloc4 4 + %emptys =l alloc4 4 + storew {}, %fmtint + storew {}, %fmtdbl + storew 0, %emptys + %vp =l alloc8 32 + %fmt1 =l add 1, %fmt + vastart %vp +@loop + %p =l phi @start %fmt1, @casef %p1, @cased %p1 + %c =w loadsb %p + %p1 =l add 3, %p + jnz %c, @loop1, @end +@loop1 + %isg =w ceqw %c, 103 + jnz %isg, @casef, @cased +@casef + %dbl =d vaarg %vp + %r =w call $printf(l %fmtdbl, ..., d %dbl) + jmp @loop +@cased + %int =w vaarg %vp + %r =w call $printf(l %fmtint, ..., w %int) + jmp @loop +@end + %r =w call $puts(l %emptys) + ret +}} +""".format( + unpack("i", b'%d \x00')[0], + unpack("i", b'%g \x00')[0] +) + +qbecall="""{ +@start + %vp =l alloc8 32 + vastart %vp + %r =w call $print(l %fmt, l %vp) + ret +} +""" + + +if __name__ == "__main__": + seed(42) + genssa(qbeprint, qbecall) + gendriver() diff --git a/src/qbe/util.c b/src/qbe/util.c new file mode 100644 index 00000000..c891c4ec --- /dev/null +++ b/src/qbe/util.c @@ -0,0 +1,774 @@ +#include "all.h" +#include + +typedef struct Bitset Bitset; +typedef struct Vec Vec; +typedef struct Bucket Bucket; + +struct Vec { + ulong mag; + Pool pool; + size_t esz; + ulong cap; + union { + long long ll; + long double ld; + void *ptr; + } align[]; +}; + +struct Bucket { + uint nstr; + char **str; +}; + +enum { + VMin = 2, + VMag = 0xcabba9e, + NPtr = 256, + IBits = 12, + IMask = (1<= NPtr) { + pp = emalloc(NPtr * sizeof(void *)); + pp[0] = pool; + pool = pp; + nptr = 1; + } + return pool[nptr++] = emalloc(n); +} + +void +freeall() +{ + void **pp; + + for (;;) { + for (pp = &pool[1]; pp < &pool[nptr]; pp++) + free(*pp); + pp = pool[0]; + if (!pp) + break; + free(pool); + pool = pp; + nptr = NPtr; + } + nptr = 1; +} + +void * +vnew(ulong len, size_t esz, Pool pool) +{ + void *(*f)(size_t); + ulong cap; + Vec *v; + + for (cap=VMin; capmag = VMag; + v->cap = cap; + v->esz = esz; + v->pool = pool; + return v + 1; +} + +void +vfree(void *p) +{ + Vec *v; + + v = (Vec *)p - 1; + assert(v->mag == VMag); + if (v->pool == PHeap) { + v->mag = 0; + free(v); + } +} + +void +vgrow(void *vp, ulong len) +{ + Vec *v; + void *v1; + + v = *(Vec **)vp - 1; + assert(v+1 && v->mag == VMag); + if (v->cap >= len) + return; + v1 = vnew(len, v->esz, v->pool); + memcpy(v1, v+1, v->cap * v->esz); + vfree(v+1); + *(Vec **)vp = v1; +} + +void +addins(Ins **pvins, uint *pnins, Ins *i) +{ + if (i->op == Onop) + return; + vgrow(pvins, ++(*pnins)); + (*pvins)[(*pnins)-1] = *i; +} + +void +addbins(Ins **pvins, uint *pnins, Blk *b) +{ + Ins *i; + + for (i=b->ins; i<&b->ins[b->nins]; i++) + addins(pvins, pnins, i); +} + +void +strf(char str[NString], char *s, ...) +{ + va_list ap; + + va_start(ap, s); + vsnprintf(str, NString, s, ap); + va_end(ap); +} + +uint32_t +intern(char *s) +{ + Bucket *b; + uint32_t h; + uint i, n; + + h = hash(s) & IMask; + b = &itbl[h]; + n = b->nstr; + + for (i=0; istr[i]) == 0) + return h + (i<str = vnew(1, sizeof b->str[0], PHeap); + else if ((n & (n-1)) == 0) + vgrow(&b->str, n+n); + + b->str[n] = emalloc(strlen(s)+1); + b->nstr = n + 1; + strcpy(b->str[n], s); + return h + (n<>IBits < itbl[id&IMask].nstr); + return itbl[id&IMask].str[id>>IBits]; +} + +int +isreg(Ref r) +{ + return rtype(r) == RTmp && r.val < Tmp0; +} + +int +iscmp(int op, int *pk, int *pc) +{ + if (Ocmpw <= op && op <= Ocmpw1) { + *pc = op - Ocmpw; + *pk = Kw; + } + else if (Ocmpl <= op && op <= Ocmpl1) { + *pc = op - Ocmpl; + *pk = Kl; + } + else if (Ocmps <= op && op <= Ocmps1) { + *pc = NCmpI + op - Ocmps; + *pk = Ks; + } + else if (Ocmpd <= op && op <= Ocmpd1) { + *pc = NCmpI + op - Ocmpd; + *pk = Kd; + } + else + return 0; + return 1; +} + +void +igroup(Blk *b, Ins *i, Ins **i0, Ins **i1) +{ + Ins *ib, *ie; + + ib = b->ins; + ie = ib + b->nins; + switch (i->op) { + case Oblit0: + *i0 = i; + *i1 = i + 2; + return; + case Oblit1: + *i0 = i - 1; + *i1 = i + 1; + return; + case_Opar: + for (; i>ib && ispar((i-1)->op); i--) + ; + *i0 = i; + for (; iop); i++) + ; + *i1 = i; + return; + case Ocall: + case_Oarg: + for (; i>ib && isarg((i-1)->op); i--) + ; + *i0 = i; + for (; iop != Ocall; i++) + ; + assert(i < ie); + *i1 = i + 1; + return; + case Osel1: + for (; i>ib && (i-1)->op == Osel1; i--) + ; + assert(i->op == Osel0); + /* fall through */ + case Osel0: + *i0 = i++; + for (; iop == Osel1; i++) + ; + *i1 = i; + return; + default: + if (ispar(i->op)) + goto case_Opar; + if (isarg(i->op)) + goto case_Oarg; + *i0 = i; + *i1 = i + 1; + return; + } +} + +int +argcls(Ins *i, int n) +{ + return optab[i->op].argcls[n][i->cls]; +} + +void +emit(int op, int k, Ref to, Ref arg0, Ref arg1) +{ + if (curi == insb) + die("emit, too many instructions"); + *--curi = (Ins){ + .op = op, .cls = k, + .to = to, .arg = {arg0, arg1} + }; +} + +void +emiti(Ins i) +{ + emit(i.op, i.cls, i.to, i.arg[0], i.arg[1]); +} + +void +idup(Blk *b, Ins *s, ulong n) +{ + vgrow(&b->ins, n); + icpy(b->ins, s, n); + b->nins = n; +} + +Ins * +icpy(Ins *d, Ins *s, ulong n) +{ + if (n) + memmove(d, s, n * sizeof(Ins)); + return d + n; +} + +static int cmptab[][2] ={ + /* negation swap */ + [Ciule] = {Ciugt, Ciuge}, + [Ciult] = {Ciuge, Ciugt}, + [Ciugt] = {Ciule, Ciult}, + [Ciuge] = {Ciult, Ciule}, + [Cisle] = {Cisgt, Cisge}, + [Cislt] = {Cisge, Cisgt}, + [Cisgt] = {Cisle, Cislt}, + [Cisge] = {Cislt, Cisle}, + [Cieq] = {Cine, Cieq}, + [Cine] = {Cieq, Cine}, + [NCmpI+Cfle] = {NCmpI+Cfgt, NCmpI+Cfge}, + [NCmpI+Cflt] = {NCmpI+Cfge, NCmpI+Cfgt}, + [NCmpI+Cfgt] = {NCmpI+Cfle, NCmpI+Cflt}, + [NCmpI+Cfge] = {NCmpI+Cflt, NCmpI+Cfle}, + [NCmpI+Cfeq] = {NCmpI+Cfne, NCmpI+Cfeq}, + [NCmpI+Cfne] = {NCmpI+Cfeq, NCmpI+Cfne}, + [NCmpI+Cfo] = {NCmpI+Cfuo, NCmpI+Cfo}, + [NCmpI+Cfuo] = {NCmpI+Cfo, NCmpI+Cfuo}, +}; + +int +cmpneg(int c) +{ + assert(0 <= c && c < NCmp); + return cmptab[c][0]; +} + +int +cmpop(int c) +{ + assert(0 <= c && c < NCmp); + return cmptab[c][1]; +} + +int +cmpwlneg(int op) +{ + if (INRANGE(op, Ocmpw, Ocmpw1)) + return cmpneg(op - Ocmpw) + Ocmpw; + if (INRANGE(op, Ocmpl, Ocmpl1)) + return cmpneg(op - Ocmpl) + Ocmpl; + die("not a wl comparison"); +} + +int +clsmerge(short *pk, short k) +{ + short k1; + + k1 = *pk; + if (k1 == Kx) { + *pk = k; + return 0; + } + if ((k1 == Kw && k == Kl) || (k1 == Kl && k == Kw)) { + *pk = Kw; + return 0; + } + return k1 != k; +} + +int +phicls(int t, Tmp *tmp) +{ + int t1; + + t1 = tmp[t].phi; + if (!t1) + return t; + t1 = phicls(t1, tmp); + tmp[t].phi = t1; + return t1; +} + +uint +phiargn(Phi *p, Blk *b) +{ + uint n; + + if (p) + for (n=0; nnarg; n++) + if (p->blk[n] == b) + return n; + return -1; +} + +Ref +phiarg(Phi *p, Blk *b) +{ + uint n; + + n = phiargn(p, b); + assert(n != -1u && "block not found"); + return p->arg[n]; +} + +Ref +newtmp(char *prfx, int k, Fn *fn) +{ + static int n; + int t; + + t = fn->ntmp++; + vgrow(&fn->tmp, fn->ntmp); + memset(&fn->tmp[t], 0, sizeof(Tmp)); + if (prfx) + strf(fn->tmp[t].name, "%s.%d", prfx, ++n); + fn->tmp[t].cls = k; + fn->tmp[t].slot = -1; + fn->tmp[t].nuse = +1; + fn->tmp[t].ndef = +1; + return TMP(t); +} + +void +chuse(Ref r, int du, Fn *fn) +{ + if (rtype(r) == RTmp) + fn->tmp[r.val].nuse += du; +} + +int +symeq(Sym s0, Sym s1) +{ + return s0.type == s1.type && s0.id == s1.id; +} + +Ref +newcon(Con *c0, Fn *fn) +{ + Con *c1; + int i; + + for (i=1; incon; i++) { + c1 = &fn->con[i]; + if (c0->type == c1->type + && symeq(c0->sym, c1->sym) + && c0->bits.i == c1->bits.i) + return CON(i); + } + vgrow(&fn->con, ++fn->ncon); + fn->con[i] = *c0; + return CON(i); +} + +Ref +getcon(int64_t val, Fn *fn) +{ + int c; + + for (c=1; cncon; c++) + if (fn->con[c].type == CBits + && fn->con[c].bits.i == val) + return CON(c); + vgrow(&fn->con, ++fn->ncon); + fn->con[c] = (Con){.type = CBits, .bits.i = val}; + return CON(c); +} + +int +addcon(Con *c0, Con *c1, int m) +{ + if (m != 1 && c1->type == CAddr) + return 0; + if (c0->type == CUndef) { + *c0 = *c1; + c0->bits.i *= m; + } else { + if (c1->type == CAddr) { + if (c0->type == CAddr) + return 0; + c0->type = CAddr; + c0->sym = c1->sym; + } + c0->bits.i += c1->bits.i * m; + } + return 1; +} + +int +isconbits(Fn *fn, Ref r, int64_t *v) +{ + Con *c; + + if (rtype(r) == RCon) { + c = &fn->con[r.val]; + if (c->type == CBits) { + *v = c->bits.i; + return 1; + } + } + return 0; +} + +void +salloc(Ref rt, Ref rs, Fn *fn) +{ + Ref r0, r1; + int64_t sz; + + /* we need to make sure + * the stack remains aligned + * (rsp = 0) mod 16 + */ + fn->dynalloc = 1; + if (rtype(rs) == RCon) { + sz = fn->con[rs.val].bits.i; + if (sz < 0 || sz >= INT_MAX-15) + err("invalid alloc size %"PRId64, sz); + sz = (sz + 15) & -16; + emit(Osalloc, Kl, rt, getcon(sz, fn), R); + } else { + /* r0 = (r + 15) & -16 */ + r0 = newtmp("isel", Kl, fn); + r1 = newtmp("isel", Kl, fn); + emit(Osalloc, Kl, rt, r0, R); + emit(Oand, Kl, r0, r1, getcon(-16, fn)); + emit(Oadd, Kl, r1, rs, getcon(15, fn)); + if (fn->tmp[rs.val].slot != -1) + err("unlikely alloc argument %%%s for %%%s", + fn->tmp[rs.val].name, fn->tmp[rt.val].name); + } +} + +void +bsinit(BSet *bs, uint n) +{ + n = (n + NBit-1) / NBit; + bs->nt = n; + bs->t = alloc(n * sizeof bs->t[0]); +} + +MAKESURE(NBit_is_64, NBit == 64); +inline static uint +popcnt(bits b) +{ + b = (b & 0x5555555555555555) + ((b>>1) & 0x5555555555555555); + b = (b & 0x3333333333333333) + ((b>>2) & 0x3333333333333333); + b = (b & 0x0f0f0f0f0f0f0f0f) + ((b>>4) & 0x0f0f0f0f0f0f0f0f); + b += (b>>8); + b += (b>>16); + b += (b>>32); + return b & 0xff; +} + +inline static int +firstbit(bits b) +{ + int n; + + n = 0; + if (!(b & 0xffffffff)) { + n += 32; + b >>= 32; + } + if (!(b & 0xffff)) { + n += 16; + b >>= 16; + } + if (!(b & 0xff)) { + n += 8; + b >>= 8; + } + if (!(b & 0xf)) { + n += 4; + b >>= 4; + } + n += (char[16]){4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0}[b & 0xf]; + return n; +} + +uint +bscount(BSet *bs) +{ + uint i, n; + + n = 0; + for (i=0; int; i++) + n += popcnt(bs->t[i]); + return n; +} + +static inline uint +bsmax(BSet *bs) +{ + return bs->nt * NBit; +} + +void +bsset(BSet *bs, uint elt) +{ + assert(elt < bsmax(bs)); + bs->t[elt/NBit] |= BIT(elt%NBit); +} + +void +bsclr(BSet *bs, uint elt) +{ + assert(elt < bsmax(bs)); + bs->t[elt/NBit] &= ~BIT(elt%NBit); +} + +#define BSOP(f, op) \ + void \ + f(BSet *a, BSet *b) \ + { \ + uint i; \ + \ + assert(a->nt == b->nt); \ + for (i=0; int; i++) \ + a->t[i] op b->t[i]; \ + } + +BSOP(bscopy, =) +BSOP(bsunion, |=) +BSOP(bsinter, &=) +BSOP(bsdiff, &= ~) + +int +bsequal(BSet *a, BSet *b) +{ + uint i; + + assert(a->nt == b->nt); + for (i=0; int; i++) + if (a->t[i] != b->t[i]) + return 0; + return 1; +} + +void +bszero(BSet *bs) +{ + memset(bs->t, 0, bs->nt * sizeof bs->t[0]); +} + +/* iterates on a bitset, use as follows + * + * for (i=0; bsiter(set, &i); i++) + * use(i); + * + */ +int +bsiter(BSet *bs, int *elt) +{ + bits b; + uint t, i; + + i = *elt; + t = i/NBit; + if (t >= bs->nt) + return 0; + b = bs->t[t]; + b &= ~(BIT(i%NBit) - 1); + while (!b) { + ++t; + if (t >= bs->nt) + return 0; + b = bs->t[t]; + } + *elt = NBit*t + firstbit(b); + return 1; +} + +void +dumpts(BSet *bs, Tmp *tmp, FILE *f) +{ + int t; + + fprintf(f, "["); + for (t=Tmp0; bsiter(bs, &t); t++) + fprintf(f, " %s", tmp[t].name); + fprintf(f, " ]\n"); +} + +void +runmatch(uchar *code, Num *tn, Ref ref, Ref *var) +{ + Ref stkbuf[20], *stk; + uchar *s, *pc; + int bc, i; + int n, nl, nr; + + assert(rtype(ref) == RTmp); + stk = stkbuf; + pc = code; + while ((bc = *pc)) + switch (bc) { + case 1: /* pushsym */ + case 2: /* push */ + assert(stk < &stkbuf[20]); + assert(rtype(ref) == RTmp); + nl = tn[ref.val].nl; + nr = tn[ref.val].nr; + if (bc == 1 && nl > nr) { + *stk++ = tn[ref.val].l; + ref = tn[ref.val].r; + } else { + *stk++ = tn[ref.val].r; + ref = tn[ref.val].l; + } + pc++; + break; + case 3: /* set */ + var[*++pc] = ref; + if (*(pc + 1) == 0) + return; + /* fall through */ + case 4: /* pop */ + assert(stk > &stkbuf[0]); + ref = *--stk; + pc++; + break; + case 5: /* switch */ + assert(rtype(ref) == RTmp); + n = tn[ref.val].n; + s = pc + 1; + for (i=*s++; i>0; i--, s++) + if (n == *s++) + break; + pc += *s; + break; + default: /* jump */ + assert(bc >= 10); + pc = code + (bc - 10); + break; + } +} From 5fcf765c8d22558d5999214298107194a8138815 Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Tue, 17 Feb 2026 10:57:50 -0600 Subject: [PATCH 2/5] parallel assembly --- build.cm | 113 ++++++++++++++++++++++++++++++++++++------------- compare_aot.ce | 17 ++++++++ qbe_emit.cm | 55 ++++++++++++------------ 3 files changed, 126 insertions(+), 59 deletions(-) diff --git a/build.cm b/build.cm index 69d775b0..8ed32357 100644 --- a/build.cm +++ b/build.cm @@ -467,6 +467,64 @@ Build.build_static = function(packages, target, output, buildtype) { // Native .cm compilation (source → mcode → QBE IL → .o → .dylib) // ============================================================================ +// Batched native compilation: split functions into batches, run QBE on each, +// assemble in parallel, return array of .o paths. +// il_parts: {data: text, functions: [text, ...]} +// cc: C compiler path +// tmp_prefix: prefix for temp files (e.g. /tmp/cell_native_) +function compile_native_batched(il_parts, cc, tmp_prefix) { + var nfuncs = length(il_parts.functions) + var nbatch = 8 + var o_paths = [] + var s_paths = [] + var asm_cmds = [] + var batch_fns = null + var batch_il = null + var asm_text = null + var s_path = null + var o_path = null + var end = 0 + var bi = 0 + var fi = 0 + var ai = 0 + var rc = null + var parallel_cmd = null + + if (nfuncs < nbatch) nbatch = nfuncs + if (nbatch < 1) nbatch = 1 + + // Generate .s files: run QBE on each batch + while (bi < nbatch) { + batch_fns = [] + end = nfuncs * (bi + 1) / nbatch + while (fi < end) { + batch_fns[] = il_parts.functions[fi] + fi = fi + 1 + } + batch_il = il_parts.data + "\n\n" + text(batch_fns, "\n") + asm_text = os.qbe(batch_il) + s_path = tmp_prefix + '_b' + text(bi) + '.s' + o_path = tmp_prefix + '_b' + text(bi) + '.o' + fd.slurpwrite(s_path, stone(blob(asm_text))) + s_paths[] = s_path + o_paths[] = o_path + bi = bi + 1 + } + + // Assemble all batches in parallel + while (ai < length(s_paths)) { + asm_cmds[] = cc + ' -c ' + s_paths[ai] + ' -o ' + o_paths[ai] + ai = ai + 1 + } + parallel_cmd = text(asm_cmds, ' & ') + ' & wait' + rc = os.system(parallel_cmd) + if (rc != 0) { + print('Parallel assembly failed'); disrupt + } + + return o_paths +} + // Post-process QBE IL: insert dead labels after ret/jmp (QBE requirement) function qbe_insert_dead_labels(il_text) { var lines = array(il_text, "\n") @@ -536,10 +594,8 @@ Build.compile_native = function(src_path, target, buildtype, pkg) { if (pkg) { sym_name = shop.c_symbol_for_file(pkg, fd.basename(src_path)) } - var il = qbe_emit(optimized, qbe_macros, sym_name) - - // Step 3: Post-process (insert dead labels) - il = qbe_insert_dead_labels(il) + var il_parts = qbe_emit(optimized, qbe_macros, sym_name) + var il = il_parts.data + "\n\n" + text(il_parts.functions, "\n") // Content hash for cache key var hash = content_hash(src + '\n' + _target + '\nnative') @@ -550,22 +606,14 @@ Build.compile_native = function(src_path, target, buildtype, pkg) { if (fd.is_file(dylib_path)) return dylib_path - // Step 4: QBE compile IR to assembly (in-process) + // Compile and assemble via batched parallel pipeline var tmp = '/tmp/cell_native_' + hash - var s_path = tmp + '.s' - var o_path = tmp + '.o' var rt_o_path = '/tmp/cell_qbe_rt.o' - var asm_text = os.qbe(il) - fd.slurpwrite(s_path, stone(blob(asm_text))) + var o_paths = compile_native_batched(il_parts, cc, tmp) - // Step 5: Assemble - var rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) - if (rc != 0) { - print('Assembly failed for: ' + src_path); disrupt - } - - // Step 7: Compile QBE runtime stubs if needed + // Compile QBE runtime stubs if needed + var rc = null if (!fd.is_file(rt_o_path)) { qbe_rt_path = shop.get_package_dir('core') + '/qbe_rt.c' rc = os.system(cc + ' -c ' + qbe_rt_path + ' -o ' + rt_o_path + ' -fPIC') @@ -574,14 +622,19 @@ Build.compile_native = function(src_path, target, buildtype, pkg) { } } - // Step 8: Link dylib + // Link dylib var link_cmd = cc + ' -shared -fPIC' if (tc.system == 'darwin') { link_cmd = link_cmd + ' -undefined dynamic_lookup' } else if (tc.system == 'linux') { link_cmd = link_cmd + ' -Wl,--allow-shlib-undefined' } - link_cmd = link_cmd + ' ' + o_path + ' ' + rt_o_path + ' -o ' + dylib_path + var oi = 0 + while (oi < length(o_paths)) { + link_cmd = link_cmd + ' ' + o_paths[oi] + oi = oi + 1 + } + link_cmd = link_cmd + ' ' + rt_o_path + ' -o ' + dylib_path rc = os.system(link_cmd) if (rc != 0) { @@ -625,8 +678,7 @@ Build.compile_native_ir = function(optimized, src_path, opts) { if (pkg) { sym_name = shop.c_symbol_for_file(pkg, fd.basename(src_path)) } - var il = qbe_emit(optimized, qbe_macros, sym_name) - il = qbe_insert_dead_labels(il) + var il_parts = qbe_emit(optimized, qbe_macros, sym_name) var src = text(fd.slurp(src_path)) var hash = content_hash(src + '\n' + _target + '\nnative') @@ -637,19 +689,14 @@ Build.compile_native_ir = function(optimized, src_path, opts) { if (fd.is_file(dylib_path)) return dylib_path + // Compile and assemble via batched parallel pipeline var tmp = '/tmp/cell_native_' + hash - var s_path = tmp + '.s' - var o_path = tmp + '.o' var rt_o_path = '/tmp/cell_qbe_rt.o' - var asm_text = os.qbe(il) - fd.slurpwrite(s_path, stone(blob(asm_text))) - - var rc = os.system(cc + ' -c ' + s_path + ' -o ' + o_path) - if (rc != 0) { - print('Assembly failed for: ' + src_path); disrupt - } + var o_paths = compile_native_batched(il_parts, cc, tmp) + // Compile QBE runtime stubs if needed + var rc = null if (!fd.is_file(rt_o_path)) { qbe_rt_path = shop.get_package_dir('core') + '/qbe_rt.c' rc = os.system(cc + ' -c ' + qbe_rt_path + ' -o ' + rt_o_path + ' -fPIC') @@ -658,13 +705,19 @@ Build.compile_native_ir = function(optimized, src_path, opts) { } } + // Link dylib var link_cmd = cc + ' -shared -fPIC' if (tc.system == 'darwin') { link_cmd = link_cmd + ' -undefined dynamic_lookup' } else if (tc.system == 'linux') { link_cmd = link_cmd + ' -Wl,--allow-shlib-undefined' } - link_cmd = link_cmd + ' ' + o_path + ' ' + rt_o_path + ' -o ' + dylib_path + var oi = 0 + while (oi < length(o_paths)) { + link_cmd = link_cmd + ' ' + o_paths[oi] + oi = oi + 1 + } + link_cmd = link_cmd + ' ' + rt_o_path + ' -o ' + dylib_path rc = os.system(link_cmd) if (rc != 0) { diff --git a/compare_aot.ce b/compare_aot.ce index 35b2bcbc..e0539e73 100644 --- a/compare_aot.ce +++ b/compare_aot.ce @@ -7,6 +7,7 @@ var build = use('build') var fd_mod = use('fd') var os = use('os') var json = use('json') +var time = use('time') var show = function(v) { if (v == null) return "null" @@ -39,12 +40,28 @@ var fold = use('fold') var mcode_mod = use('mcode') var streamline_mod = use('streamline') +var t0 = time.number() var src = text(fd_mod.slurp(abs)) +var t1 = time.number() var tok = tokenize(src, abs) +var t2 = time.number() var ast = parse_mod(tok.tokens, src, abs, tokenize) +var t3 = time.number() var folded = fold(ast) +var t4 = time.number() var compiled = mcode_mod(folded) +var t5 = time.number() var optimized = streamline_mod(compiled) +var t6 = time.number() + +print('--- front-end timing ---') +print(' read: ' + text(t1 - t0) + 's') +print(' tokenize: ' + text(t2 - t1) + 's') +print(' parse: ' + text(t3 - t2) + 's') +print(' fold: ' + text(t4 - t3) + 's') +print(' mcode: ' + text(t5 - t4) + 's') +print(' streamline: ' + text(t6 - t5) + 's') +print(' total: ' + text(t6 - t0) + 's') // Shared env for both paths — only non-intrinsic runtime functions. // Intrinsics (starts_with, ends_with, logical, some, every, etc.) live on diff --git a/qbe_emit.cm b/qbe_emit.cm index faa301c9..c0f7f130 100644 --- a/qbe_emit.cm +++ b/qbe_emit.cm @@ -127,6 +127,8 @@ var qbe_emit = function(ir, qbe, export_name) { emit(` storel ${sv}, %${t}`) } + var needs_exc_ret = false + var refresh_fp = function() { emit(` %fp =l call $cell_rt_refresh_fp_checked(l %ctx)`) var exc = fresh() @@ -134,9 +136,8 @@ var qbe_emit = function(ir, qbe, export_name) { if (has_handler && !in_handler) { emit(` jnz %${exc}, @disruption_handler, @${exc}_ok`) } else { - emit(` jnz %${exc}, @${exc}_exc, @${exc}_ok`) - emit(`@${exc}_exc`) - emit(` ret 15`) + needs_exc_ret = true + emit(` jnz %${exc}, @_exc_ret, @${exc}_ok`) } emit(`@${exc}_ok`) } @@ -161,9 +162,9 @@ var qbe_emit = function(ir, qbe, export_name) { } i = i + 1 - // Labels are plain strings; skip _nop_ur_ pseudo-labels from streamline + // Labels are plain strings; skip nop pseudo-labels from streamline if (is_text(instr)) { - if (starts_with(instr, "_nop_ur_")) continue + if (starts_with(instr, "_nop_ur_") || starts_with(instr, "_nop_tc_")) continue lbl = sanitize(instr) if (!last_was_term) { emit(` jmp @${lbl}`) @@ -839,9 +840,8 @@ var qbe_emit = function(ir, qbe, export_name) { if (has_handler) { emit(` jnz %${chk}, @disruption_handler, @${chk}_ok`) } else { - emit(` jnz %${chk}, @${chk}_exc, @${chk}_ok`) - emit(`@${chk}_exc`) - emit(` ret 15`) + needs_exc_ret = true + emit(` jnz %${chk}, @_exc_ret, @${chk}_ok`) } emit(`@${chk}_ok`) refresh_fp() @@ -857,9 +857,8 @@ var qbe_emit = function(ir, qbe, export_name) { if (has_handler) { emit(` jnz %${chk}, @disruption_handler, @${chk}_ok`) } else { - emit(` jnz %${chk}, @${chk}_exc, @${chk}_ok`) - emit(`@${chk}_exc`) - emit(` ret 15`) + needs_exc_ret = true + emit(` jnz %${chk}, @_exc_ret, @${chk}_ok`) } emit(`@${chk}_ok`) refresh_fp() @@ -886,9 +885,8 @@ var qbe_emit = function(ir, qbe, export_name) { refresh_fp() emit(` ret %${p}`) } else { - emit(` jnz %${chk}, @${chk}_exc, @${chk}_ok`) - emit(`@${chk}_exc`) - emit(` ret 15`) + needs_exc_ret = true + emit(` jnz %${chk}, @_exc_ret, @${chk}_ok`) emit(`@${chk}_ok`) emit(` ret %${p}`) } @@ -1028,6 +1026,12 @@ var qbe_emit = function(ir, qbe, export_name) { emit(` call $cell_rt_disrupt(l %ctx)`) emit(` ret 15`) + // Shared exception return (for functions without disruption handler) + if (needs_exc_ret) { + emit("@_exc_ret") + emit(" ret 15") + } + emit("}") emit("") } @@ -1036,30 +1040,23 @@ var qbe_emit = function(ir, qbe, export_name) { // Main: compile all functions then main // ============================================================ + var fn_bodies = [] var fi = 0 while (fi < length(ir.functions)) { + out = [] compile_fn(ir.functions[fi], fi, false) + fn_bodies[] = text(out, "\n") fi = fi + 1 } + out = [] compile_fn(ir.main, -1, true) + fn_bodies[] = text(out, "\n") - // Assemble: data section first, then function bodies - var result = [] - var di = 0 - while (di < length(data_out)) { - push(result, data_out[di]) - di = di + 1 + return { + data: text(data_out, "\n"), + functions: fn_bodies } - if (length(data_out) > 0) push(result, "") - - di = 0 - while (di < length(out)) { - push(result, out[di]) - di = di + 1 - } - - return text(result, "\n") } return qbe_emit From 5ef3381fffd596810b527ca37563f00f3b5583af Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Tue, 17 Feb 2026 11:12:51 -0600 Subject: [PATCH 3/5] native aot suite passes --- qbe.cm | 7 +-- qbe_emit.cm | 19 ++++++-- source/qbe_helpers.c | 105 +++++++++++++++++++++++++++++++++++++------ 3 files changed, 109 insertions(+), 22 deletions(-) diff --git a/qbe.cm b/qbe.cm index 516d1b8f..2424b566 100644 --- a/qbe.cm +++ b/qbe.cm @@ -519,12 +519,9 @@ var ne_bool = function(p, a, b) { ` } -// --- Type guard: is_identical --- +// --- Type guard: is_identical (chases forwarding pointers via C helper) --- var is_identical = function(p, a, b) { - return ` %${p}.cr =w ceql ${a}, ${b} - %${p}.crext =l extuw %${p}.cr - %${p}.sh =l shl %${p}.crext, 5 - %${p} =l or %${p}.sh, 3 + return ` %${p} =l call $cell_rt_is_identical(l %ctx, l ${a}, l ${b}) ` } diff --git a/qbe_emit.cm b/qbe_emit.cm index faa301c9..dfbdb9a7 100644 --- a/qbe_emit.cm +++ b/qbe_emit.cm @@ -102,6 +102,9 @@ var qbe_emit = function(ir, qbe, export_name) { var pat_label = null var flg_label = null var in_handler = false + var tol = null + var fn_arity = 0 + var arity_tmp = null // Function signature: (ctx, frame_ptr) → JSValue emit(`export function l $${name}(l %ctx, l %fp) {`) @@ -572,8 +575,10 @@ var qbe_emit = function(ir, qbe, export_name) { if (op == "eq_tol" || op == "ne_tol") { lhs = s_read(a2) rhs = s_read(a3) + a4 = instr[4] + tol = s_read(a4) p = fresh() - emit(` %${p} =l call $cell_rt_${op}(l %ctx, l ${lhs}, l ${rhs})`) + emit(` %${p} =l call $cell_rt_${op}(l %ctx, l ${lhs}, l ${rhs}, l ${tol})`) s_write(a1, `%${p}`) continue } @@ -700,7 +705,7 @@ var qbe_emit = function(ir, qbe, export_name) { p = fresh() if (pn != null) { sl = intern_str(pn) - emit(` %${p} =l call $cell_rt_load_field(l %ctx, l ${v}, l ${sl})`) + emit(` %${p} =l call $cell_rt_load_prop_str(l %ctx, l ${v}, l ${sl})`) } else { lhs = s_read(a3) emit(` %${p} =l call $cell_rt_load_dynamic(l %ctx, l ${v}, l ${lhs})`) @@ -899,8 +904,14 @@ var qbe_emit = function(ir, qbe, export_name) { // --- Function object creation [G] --- if (op == "function") { + fn_arity = 0 + if (a2 >= 0 && a2 < length(ir.functions)) { + fn_arity = ir.functions[a2].nr_args + } p = fresh() - emit(` %${p} =l call $cell_rt_make_function(l %ctx, l ${text(a2)}, l %fp)`) + arity_tmp = fresh() + emit(` %${arity_tmp} =l copy ${text(fn_arity)}`) + emit(` %${p} =l call $cell_rt_make_function(l %ctx, l ${text(a2)}, l %fp, l %${arity_tmp})`) refresh_fp() s_write(a1, `%${p}`) continue @@ -980,7 +991,7 @@ var qbe_emit = function(ir, qbe, export_name) { p = fresh() if (pn != null) { sl = intern_str(pn) - emit(` %${p} =l call $cell_rt_delete(l %ctx, l ${v}, l ${sl})`) + emit(` %${p} =l call $cell_rt_delete_str(l %ctx, l ${v}, l ${sl})`) } else { lhs = s_read(a3) emit(` %${p} =l call $cell_rt_delete(l %ctx, l ${v}, l ${lhs})`) diff --git a/source/qbe_helpers.c b/source/qbe_helpers.c index 8a48ce02..08e30425 100644 --- a/source/qbe_helpers.c +++ b/source/qbe_helpers.c @@ -222,6 +222,16 @@ JSValue qbe_shift_shr(JSContext *ctx, JSValue a, JSValue b) { /* --- Property access --- */ JSValue cell_rt_load_field(JSContext *ctx, JSValue obj, const char *name) { + if (JS_IsFunction(obj)) { + JS_ThrowTypeError(ctx, "cannot read property of function"); + return JS_EXCEPTION; + } + return JS_GetPropertyStr(ctx, obj, name); +} + +/* Like cell_rt_load_field but without the function guard. + Used by load_dynamic when the key happens to be a static string. */ +JSValue cell_rt_load_prop_str(JSContext *ctx, JSValue obj, const char *name) { return JS_GetPropertyStr(ctx, obj, name); } @@ -238,10 +248,15 @@ JSValue cell_rt_load_dynamic(JSContext *ctx, JSValue obj, JSValue key) { void cell_rt_store_dynamic(JSContext *ctx, JSValue val, JSValue obj, JSValue key) { - if (JS_IsInt(key)) + if (JS_IsInt(key)) { JS_SetPropertyNumber(ctx, obj, (uint32_t)JS_VALUE_GET_INT(key), val); - else + } else if (JS_IsArray(obj) && !JS_IsInt(key)) { + JS_ThrowTypeError(ctx, "array index must be a number"); + } else if (JS_IsBool(key) || JS_IsNull(key) || JS_IsArray(key) || JS_IsFunction(key)) { + JS_ThrowTypeError(ctx, "object key must be text"); + } else { JS_SetProperty(ctx, obj, key, val); + } } JSValue cell_rt_load_index(JSContext *ctx, JSValue arr, JSValue idx) { @@ -466,7 +481,8 @@ static JSValue cell_fn_trampoline(JSContext *ctx, JSValue this_val, return result; } -JSValue cell_rt_make_function(JSContext *ctx, int64_t fn_idx, void *outer_fp) { +JSValue cell_rt_make_function(JSContext *ctx, int64_t fn_idx, void *outer_fp, + int64_t nr_args) { (void)outer_fp; if (g_native_fn_count >= MAX_NATIVE_FN) return JS_ThrowTypeError(ctx, "too many native functions (max %d)", MAX_NATIVE_FN); @@ -487,7 +503,7 @@ JSValue cell_rt_make_function(JSContext *ctx, int64_t fn_idx, void *outer_fp) { } return JS_NewCFunction2(ctx, (JSCFunction *)cell_fn_trampoline, "native_fn", - 255, JS_CFUNC_generic_magic, global_id); + (int)nr_args, JS_CFUNC_generic_magic, global_id); } /* --- Frame-based function calling --- */ @@ -515,15 +531,35 @@ JSValue cell_rt_invoke(JSContext *ctx, JSValue frame_val) { JSFrameRegister *fr = (JSFrameRegister *)JS_VALUE_GET_PTR(frame_val); int nr_slots = (int)objhdr_cap56(fr->header); int c_argc = (nr_slots >= 2) ? nr_slots - 2 : 0; + JSValue fn_val = fr->function; - /* Copy args to C stack */ - JSValue args[c_argc > 0 ? c_argc : 1]; - for (int i = 0; i < c_argc; i++) - args[i] = fr->slots[i + 1]; + if (!JS_IsFunction(fn_val)) { + JS_ThrowTypeError(ctx, "not a function"); + return JS_EXCEPTION; + } + + JSFunction *fn = JS_VALUE_GET_FUNCTION(fn_val); + JSValue result; + + if (fn->kind == JS_FUNC_KIND_C) { + /* Match MACH_INVOKE: C functions go directly to js_call_c_function, + bypassing JS_Call's arity check. Extra args are silently available. */ + result = js_call_c_function(ctx, fn_val, fr->slots[0], c_argc, &fr->slots[1]); + } else { + /* Register/bytecode functions — use JS_CallInternal (no arity gate) */ + JSValue args[c_argc > 0 ? c_argc : 1]; + for (int i = 0; i < c_argc; i++) + args[i] = fr->slots[i + 1]; + result = JS_CallInternal(ctx, fn_val, fr->slots[0], c_argc, args, 0); + } - JSValue result = JS_Call(ctx, fr->function, fr->slots[0], c_argc, args); if (JS_IsException(result)) return JS_EXCEPTION; + /* Clear any stale exception left by functions that returned a valid + value despite internal error (e.g., sign("text") returns null + but JS_ToFloat64 leaves an exception flag) */ + if (JS_HasException(ctx)) + JS_GetException(ctx); return result; } @@ -549,6 +585,16 @@ JSValue cell_rt_pop(JSContext *ctx, JSValue arr) { JSValue cell_rt_delete(JSContext *ctx, JSValue obj, JSValue key) { int ret = JS_DeleteProperty(ctx, obj, key); + if (ret < 0) + return JS_EXCEPTION; + return JS_NewBool(ctx, ret >= 0); +} + +JSValue cell_rt_delete_str(JSContext *ctx, JSValue obj, const char *name) { + JSValue key = JS_NewString(ctx, name); + int ret = JS_DeleteProperty(ctx, obj, key); + if (ret < 0) + return JS_EXCEPTION; return JS_NewBool(ctx, ret >= 0); } @@ -595,12 +641,37 @@ JSValue cell_rt_ge_text(JSContext *ctx, JSValue a, JSValue b) { return JS_NewBool(ctx, r); } -JSValue cell_rt_eq_tol(JSContext *ctx, JSValue a, JSValue b) { - return JS_NewBool(ctx, a == b); +static int cell_rt_tol_eq_inner(JSContext *ctx, JSValue a, JSValue b, + JSValue tol) { + if (JS_IsNumber(a) && JS_IsNumber(b) && JS_IsNumber(tol)) { + double da, db, dt; + JS_ToFloat64(ctx, &da, a); + JS_ToFloat64(ctx, &db, b); + JS_ToFloat64(ctx, &dt, tol); + return fabs(da - db) <= dt; + } + if (JS_IsText(a) && JS_IsText(b) && JS_IsBool(tol) && JS_VALUE_GET_BOOL(tol)) { + return js_string_compare_value_nocase(ctx, a, b) == 0; + } + /* Fallback to standard equality */ + if (a == b) return 1; + if (JS_IsText(a) && JS_IsText(b)) + return js_string_compare_value(ctx, a, b, 1) == 0; + if (JS_IsNumber(a) && JS_IsNumber(b)) { + double da, db; + JS_ToFloat64(ctx, &da, a); + JS_ToFloat64(ctx, &db, b); + return da == db; + } + return 0; } -JSValue cell_rt_ne_tol(JSContext *ctx, JSValue a, JSValue b) { - return JS_NewBool(ctx, a != b); +JSValue cell_rt_eq_tol(JSContext *ctx, JSValue a, JSValue b, JSValue tol) { + return JS_NewBool(ctx, cell_rt_tol_eq_inner(ctx, a, b, tol)); +} + +JSValue cell_rt_ne_tol(JSContext *ctx, JSValue a, JSValue b, JSValue tol) { + return JS_NewBool(ctx, !cell_rt_tol_eq_inner(ctx, a, b, tol)); } /* --- Type check: is_proxy (function with arity 2) --- */ @@ -612,6 +683,14 @@ int cell_rt_is_proxy(JSContext *ctx, JSValue v) { return fn->length == 2; } +/* --- Identity check (chases forwarding pointers) --- */ + +JSValue cell_rt_is_identical(JSContext *ctx, JSValue a, JSValue b) { + if (JS_IsPtr(a)) a = JS_MKPTR(chase(a)); + if (JS_IsPtr(b)) b = JS_MKPTR(chase(b)); + return JS_NewBool(ctx, a == b); +} + /* --- Short-circuit and/or (non-allocating) --- */ JSValue cell_rt_and(JSContext *ctx, JSValue left, JSValue right) { From 570f0cdc835699e8d8b210abb21d23a377d24620 Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Tue, 17 Feb 2026 11:17:59 -0600 Subject: [PATCH 4/5] add qbe config to copmile --- src/qbe/config.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 src/qbe/config.h diff --git a/src/qbe/config.h b/src/qbe/config.h new file mode 100644 index 00000000..dfe76322 --- /dev/null +++ b/src/qbe/config.h @@ -0,0 +1,18 @@ +/* Auto-generated default target for QBE. + The qbe_backend.c #ifdef chain handles all common platforms; + this file is only reached by the #else fallback. */ +#if defined(__aarch64__) && defined(__APPLE__) +#define Deftgt T_arm64_apple +#elif defined(__aarch64__) +#define Deftgt T_arm64 +#elif defined(__x86_64__) && defined(__APPLE__) +#define Deftgt T_amd64_apple +#elif defined(__x86_64__) && defined(_WIN32) +#define Deftgt T_amd64_win +#elif defined(__x86_64__) +#define Deftgt T_amd64_sysv +#elif defined(__riscv) && __riscv_xlen == 64 +#define Deftgt T_rv64 +#else +#error "unsupported target for QBE" +#endif From 3f6388ff4e345d1b510aebfa35995bd631ed0663 Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Tue, 17 Feb 2026 11:53:46 -0600 Subject: [PATCH 5/5] far smaller assmbly --- build.cm | 8 +- qbe_emit.cm | 1109 ++++++++++++++++++++++++++++++++------------------- 2 files changed, 711 insertions(+), 406 deletions(-) diff --git a/build.cm b/build.cm index 8ed32357..4a1b8016 100644 --- a/build.cm +++ b/build.cm @@ -489,6 +489,9 @@ function compile_native_batched(il_parts, cc, tmp_prefix) { var ai = 0 var rc = null var parallel_cmd = null + var helpers_il = (il_parts.helpers && length(il_parts.helpers) > 0) + ? text(il_parts.helpers, "\n") : "" + var prefix = null if (nfuncs < nbatch) nbatch = nfuncs if (nbatch < 1) nbatch = 1 @@ -501,7 +504,9 @@ function compile_native_batched(il_parts, cc, tmp_prefix) { batch_fns[] = il_parts.functions[fi] fi = fi + 1 } - batch_il = il_parts.data + "\n\n" + text(batch_fns, "\n") + // Batch 0 includes helper functions; others reference them as external symbols + prefix = (bi == 0 && helpers_il != "") ? helpers_il + "\n\n" : "" + batch_il = il_parts.data + "\n\n" + prefix + text(batch_fns, "\n") asm_text = os.qbe(batch_il) s_path = tmp_prefix + '_b' + text(bi) + '.s' o_path = tmp_prefix + '_b' + text(bi) + '.o' @@ -595,7 +600,6 @@ Build.compile_native = function(src_path, target, buildtype, pkg) { sym_name = shop.c_symbol_for_file(pkg, fd.basename(src_path)) } var il_parts = qbe_emit(optimized, qbe_macros, sym_name) - var il = il_parts.data + "\n\n" + text(il_parts.functions, "\n") // Content hash for cache key var hash = content_hash(src + '\n' + _target + '\nnative') diff --git a/qbe_emit.cm b/qbe_emit.cm index c0f7f130..2b1246c2 100644 --- a/qbe_emit.cm +++ b/qbe_emit.cm @@ -3,6 +3,580 @@ // a complete QBE IL program ready for the qbe compiler. // qbe module is passed via env as 'qbe' +// ============================================================ +// QBE IL helper function generation +// Generates helper functions that are defined once and called +// from each operation site, reducing code duplication. +// ============================================================ + +var emit_helpers = function(qbe) { + var h = [] + + // --- Slot access IL fragments --- + var sr = function(name, slot) { + return ` %${name}.o =l shl ${slot}, 3 + %${name}.p =l add %fp, %${name}.o + %${name} =l loadl %${name}.p` + } + + var sw = function(name, fp_var, slot, val) { + return ` %${name}.o =l shl ${slot}, 3 + %${name}.p =l add ${fp_var}, %${name}.o + storel ${val}, %${name}.p` + } + + // --- Allocating tail: refresh fp, write result, return fp --- + var alloc_tail = function(result_var) { + return ` %fp2 =l call $cell_rt_refresh_fp_checked(l %ctx) + jnz %fp2, @ok, @exc +@ok +${sw("w", "%fp2", "%dest", result_var)} + ret %fp2 +@exc + ret 0` + } + + // --- Allocating tail without dest write --- + var alloc_tail_nw = function() { + return ` %fp2 =l call $cell_rt_refresh_fp_checked(l %ctx) + jnz %fp2, @ok, @exc +@ok + ret %fp2 +@exc + ret 0` + } + + // ============================================================ + // Category A: Pure QBE helpers (no C calls) + // ============================================================ + + // move + h[] = `export function $__move_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} +${sw("w", "%fp", "%dest", "%a")} + ret +}` + + // int comparisons + var int_ops = [ + ["eq_int", "ceqw"], ["ne_int", "cnew"], ["lt_int", "csltw"], + ["le_int", "cslew"], ["gt_int", "csgtw"], ["ge_int", "csgew"] + ] + var i = 0 + while (i < length(int_ops)) { + h[] = `export function $__${int_ops[i][0]}_ss(l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %ia =l sar %a, 1 + %ib =l sar %b, 1 + %iaw =w copy %ia + %ibw =w copy %ib + %cr =w ${int_ops[i][1]} %iaw, %ibw + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // bool comparisons + h[] = `export function $__eq_bool_ss(l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %cr =w ceql %a, %b + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + h[] = `export function $__ne_bool_ss(l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %cr =w cnel %a, %b + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // is_identical (same as eq_bool) + h[] = `export function $__is_identical_ss(l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %cr =w ceql %a, %b + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // is_int: (val & 1) == 0 + h[] = `export function $__is_int_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %t =l and %a, 1 + %cr =w ceql %t, 0 + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // is_null: (val & 31) == 7 + h[] = `export function $__is_null_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %t =l and %a, 31 + %cr =w ceql %t, 7 + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // is_bool: (val & 31) == 3 + h[] = `export function $__is_bool_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %t =l and %a, 31 + %cr =w ceql %t, 3 + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // is_num: (val & 1 == 0) || (val & 7 == 5) + h[] = `export function $__is_num_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %t1 =l and %a, 1 + %ii =w ceql %t1, 0 + %t2 =l and %a, 7 + %fi =w ceql %t2, 5 + %cr =w or %ii, %fi + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // ============================================================ + // Category B: Non-allocating C call helpers + // ============================================================ + + // Type checks via C (no ctx needed except is_proxy) + var tc_ops = [ + ["is_text", "JS_IsText", false], + ["is_array", "JS_IsArray", false], + ["is_func", "JS_IsFunction", false], + ["is_record", "JS_IsRecord", false], + ["is_stone", "JS_IsStone", false], + ["is_proxy", "cell_rt_is_proxy", true] + ] + var tc_name = null + var tc_cfn = null + var tc_ctx = null + i = 0 + while (i < length(tc_ops)) { + tc_name = tc_ops[i][0] + tc_cfn = tc_ops[i][1] + tc_ctx = tc_ops[i][2] + if (tc_ctx) { + h[] = `export function $__${tc_name}_ss(l %ctx, l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %cr =w call $${tc_cfn}(l %ctx, l %a) + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + } else { + h[] = `export function $__${tc_name}_ss(l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %cr =w call $${tc_cfn}(l %a) + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + } + i = i + 1 + } + + // Float comparisons: call qbe_float_cmp(ctx, op_id, a, b) → w, tag + var fc_ops = [ + ["eq_float", 0], ["ne_float", 1], ["lt_float", 2], + ["le_float", 3], ["gt_float", 4], ["ge_float", 5] + ] + i = 0 + while (i < length(fc_ops)) { + h[] = `export function $__${fc_ops[i][0]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %cr =w call $qbe_float_cmp(l %ctx, w ${fc_ops[i][1]}, l %a, l %b) + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // Text comparisons: eq/ne via js_string_compare_value, others via cell_rt_* + var txcmp_sv = [ + ["eq_text", "ceqw", 1], ["ne_text", "cnew", 1] + ] + i = 0 + while (i < length(txcmp_sv)) { + h[] = `export function $__${txcmp_sv[i][0]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %scmp =w call $js_string_compare_value(l %ctx, l %a, l %b, w ${txcmp_sv[i][2]}) + %cr =w ${txcmp_sv[i][1]} %scmp, 0 + %crext =l extuw %cr + %sh =l shl %crext, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // lt/le/gt/ge_text via cell_rt_* (return tagged JSValue directly) + var txcmp_rt = ["lt_text", "gt_text", "le_text", "ge_text"] + i = 0 + while (i < length(txcmp_rt)) { + h[] = `export function $__${txcmp_rt[i]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $cell_rt_${txcmp_rt[i]}(l %ctx, l %a, l %b) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // eq_tol, ne_tol (return tagged JSValue directly) + var tol_ops = ["eq_tol", "ne_tol"] + i = 0 + while (i < length(tol_ops)) { + h[] = `export function $__${tol_ops[i]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $cell_rt_${tol_ops[i]}(l %ctx, l %a, l %b) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // not: JS_ToBool + negate + tag + h[] = `export function $__not_ss(l %ctx, l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %bval =w call $JS_ToBool(l %ctx, l %a) + %neg =w ceqw %bval, 0 + %nex =l extuw %neg + %sh =l shl %nex, 5 + %r =l or %sh, 3 +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // and, or (return tagged JSValue directly) + h[] = `export function $__and_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $cell_rt_and(l %ctx, l %a, l %b) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + h[] = `export function $__or_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $cell_rt_or(l %ctx, l %a, l %b) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // Bitwise unary: bnot + h[] = `export function $__bnot_ss(l %ctx, l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %r =l call $qbe_bnot(l %ctx, l %a) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + + // Bitwise binary ops + var bw_ops = [ + ["band", "qbe_bitwise_and"], ["bor", "qbe_bitwise_or"], + ["bxor", "qbe_bitwise_xor"], ["bshl", "qbe_shift_shl"], + ["bshr", "qbe_shift_sar"], ["bushr", "qbe_shift_shr"] + ] + i = 0 + while (i < length(bw_ops)) { + h[] = `export function $__${bw_ops[i][0]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $${bw_ops[i][1]}(l %ctx, l %a, l %b) +${sw("w", "%fp", "%dest", "%r")} + ret +}` + i = i + 1 + } + + // ============================================================ + // Category C: Allocating helpers (return fp or 0) + // ============================================================ + + // Allocating binary ops: read 2 slots, call C, refresh, write dest + var ab_ops = [ + ["add", "cell_rt_add"], ["sub", "qbe_float_sub"], + ["mul", "qbe_float_mul"], ["div", "qbe_float_div"], + ["mod", "qbe_float_mod"], ["pow", "qbe_float_pow"], + ["concat", "JS_ConcatString"] + ] + i = 0 + while (i < length(ab_ops)) { + h[] = `export function l $__${ab_ops[i][0]}_ss(l %ctx, l %fp, l %dest, l %s1, l %s2) { +@entry +${sr("a", "%s1")} +${sr("b", "%s2")} + %r =l call $${ab_ops[i][1]}(l %ctx, l %a, l %b) +${alloc_tail("%r")} +}` + i = i + 1 + } + + // Allocating unary: negate + h[] = `export function l $__neg_ss(l %ctx, l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %r =l call $qbe_float_neg(l %ctx, l %a) +${alloc_tail("%r")} +}` + + // Property access: load_field(ctx, fp, dest, obj_slot, name_ptr) + h[] = `export function l $__load_field_ss(l %ctx, l %fp, l %dest, l %obj_slot, l %name) { +@entry +${sr("a", "%obj_slot")} + %r =l call $cell_rt_load_field(l %ctx, l %a, l %name) +${alloc_tail("%r")} +}` + + // load_dynamic(ctx, fp, dest, obj_slot, key_slot) + h[] = `export function l $__load_dynamic_ss(l %ctx, l %fp, l %dest, l %obj_slot, l %key_slot) { +@entry +${sr("a", "%obj_slot")} +${sr("b", "%key_slot")} + %r =l call $cell_rt_load_dynamic(l %ctx, l %a, l %b) +${alloc_tail("%r")} +}` + + // load_index(ctx, fp, dest, arr_slot, idx_slot) + h[] = `export function l $__load_index_ss(l %ctx, l %fp, l %dest, l %arr_slot, l %idx_slot) { +@entry +${sr("a", "%arr_slot")} +${sr("b", "%idx_slot")} + %r =l call $cell_rt_load_index(l %ctx, l %a, l %b) +${alloc_tail("%r")} +}` + + // store_field(ctx, fp, obj_slot, val_slot, name_ptr) — no dest write + h[] = `export function l $__store_field_ss(l %ctx, l %fp, l %obj_slot, l %val_slot, l %name) { +@entry +${sr("a", "%obj_slot")} +${sr("b", "%val_slot")} + call $cell_rt_store_field(l %ctx, l %b, l %a, l %name) +${alloc_tail_nw()} +}` + + // store_dynamic(ctx, fp, obj_slot, val_slot, key_slot) — no dest write + h[] = `export function l $__store_dynamic_ss(l %ctx, l %fp, l %obj_slot, l %val_slot, l %key_slot) { +@entry +${sr("a", "%obj_slot")} +${sr("b", "%val_slot")} +${sr("c", "%key_slot")} + call $cell_rt_store_dynamic(l %ctx, l %b, l %a, l %c) +${alloc_tail_nw()} +}` + + // store_index(ctx, fp, obj_slot, val_slot, idx_slot) — no dest write + h[] = `export function l $__store_index_ss(l %ctx, l %fp, l %obj_slot, l %val_slot, l %idx_slot) { +@entry +${sr("a", "%obj_slot")} +${sr("b", "%val_slot")} +${sr("c", "%idx_slot")} + call $cell_rt_store_index(l %ctx, l %b, l %a, l %c) +${alloc_tail_nw()} +}` + + // frame(ctx, fp, dest, fn_slot, nargs) + h[] = `export function l $__frame_ss(l %ctx, l %fp, l %dest, l %fn_slot, l %nargs) { +@entry +${sr("a", "%fn_slot")} + %r =l call $cell_rt_frame(l %ctx, l %a, l %nargs) +${alloc_tail("%r")} +}` + + // goframe(ctx, fp, dest, fn_slot, nargs) + h[] = `export function l $__goframe_ss(l %ctx, l %fp, l %dest, l %fn_slot, l %nargs) { +@entry +${sr("a", "%fn_slot")} + %r =l call $cell_rt_goframe(l %ctx, l %a, l %nargs) +${alloc_tail("%r")} +}` + + // invoke(ctx, fp, frame_slot, result_slot) — two checks: exc + refresh + h[] = `export function l $__invoke_ss(l %ctx, l %fp, l %frame_slot, l %result_slot) { +@entry +${sr("a", "%frame_slot")} + %r =l call $cell_rt_invoke(l %ctx, l %a) + %is_exc =w ceql %r, 15 + jnz %is_exc, @exc, @ok1 +@ok1 + %fp2 =l call $cell_rt_refresh_fp_checked(l %ctx) + jnz %fp2, @ok2, @exc +@ok2 +${sw("w", "%fp2", "%result_slot", "%r")} + ret %fp2 +@exc + ret 0 +}` + + // function(ctx, fp, dest, fn_idx) + h[] = `export function l $__function_ss(l %ctx, l %fp, l %dest, l %fn_idx) { +@entry + %r =l call $cell_rt_make_function(l %ctx, l %fn_idx, l %fp) +${alloc_tail("%r")} +}` + + // new_record(ctx, fp, dest) + h[] = `export function l $__new_record_ss(l %ctx, l %fp, l %dest) { +@entry + %r =l call $JS_NewObject(l %ctx) +${alloc_tail("%r")} +}` + + // new_array(ctx, fp, dest) + h[] = `export function l $__new_array_ss(l %ctx, l %fp, l %dest) { +@entry + %r =l call $JS_NewArray(l %ctx) +${alloc_tail("%r")} +}` + + // new_string(ctx, fp, dest, str_ptr) + h[] = `export function l $__new_string_ss(l %ctx, l %fp, l %dest, l %str_ptr) { +@entry + %r =l call $qbe_new_string(l %ctx, l %str_ptr) +${alloc_tail("%r")} +}` + + // new_float64(ctx, fp, dest, val) — val is a double + h[] = `export function l $__new_float64_ss(l %ctx, l %fp, l %dest, d %val) { +@entry + %r =l call $qbe_new_float64(l %ctx, d %val) +${alloc_tail("%r")} +}` + + // get_intrinsic(ctx, fp, dest, name_ptr) + h[] = `export function l $__get_intrinsic_ss(l %ctx, l %fp, l %dest, l %name_ptr) { +@entry + %r =l call $cell_rt_get_intrinsic(l %ctx, l %name_ptr) +${alloc_tail("%r")} +}` + + // push(ctx, fp, arr_slot, val_slot) — no dest write + h[] = `export function l $__push_ss(l %ctx, l %fp, l %arr_slot, l %val_slot) { +@entry +${sr("a", "%arr_slot")} +${sr("b", "%val_slot")} + call $cell_rt_push(l %ctx, l %a, l %b) +${alloc_tail_nw()} +}` + + // pop(ctx, fp, dest, arr_slot) + h[] = `export function l $__pop_ss(l %ctx, l %fp, l %dest, l %arr_slot) { +@entry +${sr("a", "%arr_slot")} + %r =l call $cell_rt_pop(l %ctx, l %a) +${alloc_tail("%r")} +}` + + // length(ctx, fp, dest, src) + h[] = `export function l $__length_ss(l %ctx, l %fp, l %dest, l %src) { +@entry +${sr("a", "%src")} + %r =l call $JS_CellLength(l %ctx, l %a) +${alloc_tail("%r")} +}` + + // delete_field(ctx, fp, dest, obj_slot, name_ptr) + h[] = `export function l $__delete_field_ss(l %ctx, l %fp, l %dest, l %obj_slot, l %name) { +@entry +${sr("a", "%obj_slot")} + %r =l call $cell_rt_delete(l %ctx, l %a, l %name) +${alloc_tail("%r")} +}` + + // delete_dynamic(ctx, fp, dest, obj_slot, key_slot) + h[] = `export function l $__delete_dynamic_ss(l %ctx, l %fp, l %dest, l %obj_slot, l %key_slot) { +@entry +${sr("a", "%obj_slot")} +${sr("b", "%key_slot")} + %r =l call $cell_rt_delete(l %ctx, l %a, l %b) +${alloc_tail("%r")} +}` + + // in(ctx, fp, dest, key_slot, obj_slot) + h[] = `export function l $__in_ss(l %ctx, l %fp, l %dest, l %key_slot, l %obj_slot) { +@entry +${sr("a", "%key_slot")} +${sr("b", "%obj_slot")} + %r =l call $cell_rt_in(l %ctx, l %a, l %b) +${alloc_tail("%r")} +}` + + // regexp(ctx, fp, dest, pat_ptr, flg_ptr) + h[] = `export function l $__regexp_ss(l %ctx, l %fp, l %dest, l %pat, l %flg) { +@entry + %r =l call $cell_rt_regexp(l %ctx, l %pat, l %flg) +${alloc_tail("%r")} +}` + + return h +} + var qbe_emit = function(ir, qbe, export_name) { var out = [] var data_out = [] @@ -142,6 +716,18 @@ var qbe_emit = function(ir, qbe, export_name) { emit(`@${exc}_ok`) } + // Exception check after allocating helper call (helper returns fp or 0) + var emit_exc_check = function() { + var lbl = fresh() + if (has_handler && !in_handler) { + emit(` jnz %fp, @${lbl}_ok, @disruption_handler`) + } else { + needs_exc_ret = true + emit(` jnz %fp, @${lbl}_ok, @_exc_ret`) + } + emit(`@${lbl}_ok`) + } + // Walk instructions var last_was_term = false i = 0 @@ -202,41 +788,35 @@ var qbe_emit = function(ir, qbe, export_name) { continue } if (op == "access") { - p = fresh() if (is_number(a2)) { if (is_integer(a2)) { s_write(a1, text(a2 * 2)) } else { - emit(` %${p} =l call $qbe_new_float64(l %ctx, d d_${text(a2)})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_float64_ss(l %ctx, l %fp, l ${text(a1)}, d d_${text(a2)})`) + emit_exc_check() } } else if (is_text(a2)) { sl = intern_str(a2) - emit(` %${p} =l call $qbe_new_string(l %ctx, l ${sl})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_string_ss(l %ctx, l %fp, l ${text(a1)}, l ${sl})`) + emit_exc_check() } else if (is_object(a2)) { if (a2.make == "intrinsic") { sl = intern_str(a2.name) - emit(` %${p} =l call $cell_rt_get_intrinsic(l %ctx, l ${sl})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__get_intrinsic_ss(l %ctx, l %fp, l ${text(a1)}, l ${sl})`) + emit_exc_check() } else if (a2.kind == "number") { if (a2.number != null && is_integer(a2.number)) { s_write(a1, text(a2.number * 2)) } else if (a2.number != null) { - emit(` %${p} =l call $qbe_new_float64(l %ctx, d d_${text(a2.number)})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_float64_ss(l %ctx, l %fp, l ${text(a1)}, d d_${text(a2.number)})`) + emit_exc_check() } else { s_write(a1, text(qbe.js_null)) } } else if (a2.kind == "text") { sl = intern_str(a2.value) - emit(` %${p} =l call $qbe_new_string(l %ctx, l ${sl})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_string_ss(l %ctx, l %fp, l ${text(a1)}, l ${sl})`) + emit_exc_check() } else if (a2.kind == "true") { s_write(a1, text(qbe.js_true)) } else if (a2.kind == "false") { @@ -255,507 +835,285 @@ var qbe_emit = function(ir, qbe, export_name) { // --- Movement --- if (op == "move") { - v = s_read(a2) - s_write(a1, v) + emit(` call $__move_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } // --- Generic arithmetic (VM dispatches int/float) --- if (op == "add") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_add(l %ctx, l ${lhs}, l ${rhs})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__add_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "subtract") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.sub(p, "%ctx", lhs, rhs)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__sub_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "multiply") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.mul(p, "%ctx", lhs, rhs)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__mul_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "divide") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.div(p, "%ctx", lhs, rhs)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__div_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "modulo") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.mod(p, "%ctx", lhs, rhs)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__mod_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "negate") { - v = s_read(a2) - p = fresh() - emit(qbe.neg(p, "%ctx", v)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__neg_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } if (op == "pow") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $qbe_float_pow(l %ctx, l ${lhs}, l ${rhs})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__pow_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } // --- String concat --- if (op == "concat") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.concat(p, "%ctx", lhs, rhs)) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__concat_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } // --- Type checks — use qbe.cm macros (no GC, no refresh) --- if (op == "is_int") { - v = s_read(a2) - p = fresh() - emit(qbe.is_int(p, v)) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_int_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_text") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $JS_IsText(l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_text_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_num") { - v = s_read(a2) - p = fresh() - emit(qbe.is_number(p, v)) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_num_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_bool") { - v = s_read(a2) - p = fresh() - emit(qbe.is_bool(p, v)) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_bool_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_null") { - v = s_read(a2) - p = fresh() - emit(qbe.is_null(p, v)) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_null_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_identical") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.is_identical(p, lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__is_identical_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "is_array") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $JS_IsArray(l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_array_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_func") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $JS_IsFunction(l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_func_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_record") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $JS_IsRecord(l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_record_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_stone") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $JS_IsStone(l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_stone_ss(l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "is_proxy") { - v = s_read(a2) - p = fresh() - emit(` %${p} =w call $cell_rt_is_proxy(l %ctx, l ${v})`) - emit(qbe.new_bool(p + ".r", "%" + p)) - s_write(a1, `%${p}.r`) + emit(` call $__is_proxy_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) continue } // --- Comparisons (int path, no GC) --- if (op == "eq_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.eq_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__eq_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ne_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ne_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ne_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "lt_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.lt_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__lt_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "gt_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.gt_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__gt_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "le_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.le_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__le_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ge_int") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ge_int(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ge_int_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } // --- Comparisons (float/text/bool) --- if (op == "eq_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.eq_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__eq_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ne_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ne_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ne_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "lt_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.lt_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__lt_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "le_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.le_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__le_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "gt_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.gt_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__gt_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ge_float") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ge_float(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ge_float_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "eq_text") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.eq_text(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__eq_text_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ne_text") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ne_text(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ne_text_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "lt_text" || op == "gt_text" || op == "le_text" || op == "ge_text") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_${op}(l %ctx, l ${lhs}, l ${rhs})`) - s_write(a1, `%${p}`) + emit(` call $__${op}_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "eq_bool") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.eq_bool(p, lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__eq_bool_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ne_bool") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ne_bool(p, lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__ne_bool_ss(l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "eq_tol" || op == "ne_tol") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_${op}(l %ctx, l ${lhs}, l ${rhs})`) - s_write(a1, `%${p}`) + emit(` call $__${op}_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } // --- Boolean ops --- if (op == "not") { - v = s_read(a2) - p = fresh() - emit(qbe.lnot(p, "%ctx", v)) - s_write(a1, `%${p}`) + emit(` call $__not_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "and") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_and(l %ctx, l ${lhs}, l ${rhs})`) - s_write(a1, `%${p}`) + emit(` call $__and_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "or") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_or(l %ctx, l ${lhs}, l ${rhs})`) - s_write(a1, `%${p}`) + emit(` call $__or_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } // --- Bitwise ops — use qbe.cm macros (no GC) --- if (op == "bitnot") { - v = s_read(a2) - p = fresh() - emit(qbe.bnot(p, "%ctx", v)) - s_write(a1, `%${p}`) + emit(` call $__bnot_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) continue } if (op == "bitand") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.band(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__band_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "bitor") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.bor(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__bor_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "bitxor") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.bxor(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__bxor_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "shl") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.shl(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__bshl_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "shr") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.shr(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__bshr_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } if (op == "ushr") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(qbe.ushr(p, "%ctx", lhs, rhs)) - s_write(a1, `%${p}`) + emit(` call $__bushr_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) continue } // --- Property access — runtime calls [G] --- if (op == "load_field") { - v = s_read(a2) - pn = null - if (is_text(a3)) pn = a3 - else if (is_object(a3) && a3.name != null) pn = a3.name - else if (is_object(a3) && a3.value != null) pn = a3.value - p = fresh() + pn = prop_name(a3) if (pn != null) { sl = intern_str(pn) - emit(` %${p} =l call $cell_rt_load_field(l %ctx, l ${v}, l ${sl})`) + emit(` %fp =l call $__load_field_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${sl})`) } else { - lhs = s_read(a3) - emit(` %${p} =l call $cell_rt_load_dynamic(l %ctx, l ${v}, l ${lhs})`) + emit(` %fp =l call $__load_dynamic_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) } - refresh_fp() - s_write(a1, `%${p}`) + emit_exc_check() continue } if (op == "load_index") { - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_load_index(l %ctx, l ${lhs}, l ${rhs})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__load_index_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "load_dynamic") { - v = s_read(a2) - pn = null - if (is_text(a3)) pn = a3 - else if (is_object(a3) && a3.name != null) pn = a3.name - else if (is_object(a3) && a3.value != null) pn = a3.value - p = fresh() + pn = prop_name(a3) if (pn != null) { sl = intern_str(pn) - emit(` %${p} =l call $cell_rt_load_field(l %ctx, l ${v}, l ${sl})`) + emit(` %fp =l call $__load_field_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${sl})`) } else { - lhs = s_read(a3) - emit(` %${p} =l call $cell_rt_load_dynamic(l %ctx, l ${v}, l ${lhs})`) + emit(` %fp =l call $__load_dynamic_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) } - refresh_fp() - s_write(a1, `%${p}`) + emit_exc_check() continue } if (op == "store_field") { - // IR: ["store_field", obj, val, prop] → C: (ctx, val, obj, name) - obj = s_read(a1) - v = s_read(a2) - pn = null - if (is_text(a3)) { - pn = a3 - } else if (is_object(a3)) { - if (a3.name != null) pn = a3.name - else if (a3.value != null) pn = a3.value - } + // IR: ["store_field", obj, val, prop] + pn = prop_name(a3) if (pn != null) { sl = intern_str(pn) - emit(` call $cell_rt_store_field(l %ctx, l ${v}, l ${obj}, l ${sl})`) + emit(` %fp =l call $__store_field_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${sl})`) } else { - lhs = s_read(a3) - emit(` call $cell_rt_store_dynamic(l %ctx, l ${v}, l ${obj}, l ${lhs})`) + emit(` %fp =l call $__store_dynamic_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) } - refresh_fp() + emit_exc_check() continue } if (op == "store_index") { - // IR: ["store_index", obj, val, idx] → C: (ctx, val, obj, idx) - obj = s_read(a1) - v = s_read(a2) - lhs = s_read(a3) - emit(` call $cell_rt_store_index(l %ctx, l ${v}, l ${obj}, l ${lhs})`) - refresh_fp() + // IR: ["store_index", obj, val, idx] + emit(` %fp =l call $__store_index_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "store_dynamic") { - // IR: ["store_dynamic", obj, val, key] → C: (ctx, val, obj, key) - obj = s_read(a1) - v = s_read(a2) - pn = null - if (is_text(a3)) pn = a3 - else if (is_object(a3) && a3.name != null) pn = a3.name - else if (is_object(a3) && a3.value != null) pn = a3.value + // IR: ["store_dynamic", obj, val, key] + pn = prop_name(a3) if (pn != null) { sl = intern_str(pn) - emit(` call $cell_rt_store_field(l %ctx, l ${v}, l ${obj}, l ${sl})`) + emit(` %fp =l call $__store_field_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${sl})`) } else { - lhs = s_read(a3) - emit(` call $cell_rt_store_dynamic(l %ctx, l ${v}, l ${obj}, l ${lhs})`) + emit(` %fp =l call $__store_dynamic_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) } - refresh_fp() + emit_exc_check() continue } @@ -818,11 +1176,8 @@ var qbe_emit = function(ir, qbe, export_name) { // --- Function calls [G] --- if (op == "frame") { - v = s_read(a2) - p = fresh() - emit(` %${p} =l call $cell_rt_frame(l %ctx, l ${v}, l ${text(a3)})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__frame_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "setarg") { @@ -832,45 +1187,18 @@ var qbe_emit = function(ir, qbe, export_name) { continue } if (op == "invoke") { - v = s_read(a1) - p = fresh() - emit(` %${p} =l call $cell_rt_invoke(l %ctx, l ${v})`) - chk = fresh() - emit(` %${chk} =w ceql %${p}, 15`) - if (has_handler) { - emit(` jnz %${chk}, @disruption_handler, @${chk}_ok`) - } else { - needs_exc_ret = true - emit(` jnz %${chk}, @_exc_ret, @${chk}_ok`) - } - emit(`@${chk}_ok`) - refresh_fp() - s_write(a2, `%${p}`) + emit(` %fp =l call $__invoke_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } if (op == "tail_invoke") { - v = s_read(a1) - p = fresh() - emit(` %${p} =l call $cell_rt_invoke(l %ctx, l ${v})`) - chk = fresh() - emit(` %${chk} =w ceql %${p}, 15`) - if (has_handler) { - emit(` jnz %${chk}, @disruption_handler, @${chk}_ok`) - } else { - needs_exc_ret = true - emit(` jnz %${chk}, @_exc_ret, @${chk}_ok`) - } - emit(`@${chk}_ok`) - refresh_fp() - s_write(a2, `%${p}`) + emit(` %fp =l call $__invoke_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } if (op == "goframe") { - v = s_read(a2) - p = fresh() - emit(` %${p} =l call $cell_rt_goframe(l %ctx, l ${v}, l ${text(a3)})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__goframe_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } if (op == "goinvoke") { @@ -897,57 +1225,42 @@ var qbe_emit = function(ir, qbe, export_name) { // --- Function object creation [G] --- if (op == "function") { - p = fresh() - emit(` %${p} =l call $cell_rt_make_function(l %ctx, l ${text(a2)}, l %fp)`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__function_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } // --- Record/Array creation [G] --- if (op == "record") { - p = fresh() - emit(` %${p} =l call $JS_NewObject(l %ctx)`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_record_ss(l %ctx, l %fp, l ${text(a1)})`) + emit_exc_check() continue } if (op == "array") { - // a2 is a size hint; elements are pushed via separate push instructions - p = fresh() - emit(` %${p} =l call $JS_NewArray(l %ctx)`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__new_array_ss(l %ctx, l %fp, l ${text(a1)})`) + emit_exc_check() continue } // --- Array push/pop [G] --- if (op == "push") { - lhs = s_read(a1) - rhs = s_read(a2) - emit(` call $cell_rt_push(l %ctx, l ${lhs}, l ${rhs})`) - refresh_fp() + emit(` %fp =l call $__push_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } if (op == "pop") { - v = s_read(a2) - p = fresh() - emit(` %${p} =l call $cell_rt_pop(l %ctx, l ${v})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__pop_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } // --- Length [G] --- if (op == "length") { - v = s_read(a2) - p = fresh() - emit(` %${p} =l call $JS_CellLength(l %ctx, l ${v})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__length_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)})`) + emit_exc_check() continue } @@ -970,21 +1283,14 @@ var qbe_emit = function(ir, qbe, export_name) { continue } if (op == "delete") { - v = s_read(a2) - pn = null - if (is_text(a3)) pn = a3 - else if (is_object(a3) && a3.name != null) pn = a3.name - else if (is_object(a3) && a3.value != null) pn = a3.value - p = fresh() + pn = prop_name(a3) if (pn != null) { sl = intern_str(pn) - emit(` %${p} =l call $cell_rt_delete(l %ctx, l ${v}, l ${sl})`) + emit(` %fp =l call $__delete_field_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${sl})`) } else { - lhs = s_read(a3) - emit(` %${p} =l call $cell_rt_delete(l %ctx, l ${v}, l ${lhs})`) + emit(` %fp =l call $__delete_dynamic_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) } - refresh_fp() - s_write(a1, `%${p}`) + emit_exc_check() continue } @@ -992,12 +1298,8 @@ var qbe_emit = function(ir, qbe, export_name) { if (op == "in") { // IR: ["in", dest, key_slot, obj_slot] - lhs = s_read(a2) - rhs = s_read(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_in(l %ctx, l ${lhs}, l ${rhs})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__in_ss(l %ctx, l %fp, l ${text(a1)}, l ${text(a2)}, l ${text(a3)})`) + emit_exc_check() continue } @@ -1007,10 +1309,8 @@ var qbe_emit = function(ir, qbe, export_name) { // IR: ["regexp", dest_slot, pattern_string, flags_string] pat_label = intern_str(a2) flg_label = intern_str(a3) - p = fresh() - emit(` %${p} =l call $cell_rt_regexp(l %ctx, l ${pat_label}, l ${flg_label})`) - refresh_fp() - s_write(a1, `%${p}`) + emit(` %fp =l call $__regexp_ss(l %ctx, l %fp, l ${text(a1)}, l ${pat_label}, l ${flg_label})`) + emit_exc_check() continue } @@ -1055,7 +1355,8 @@ var qbe_emit = function(ir, qbe, export_name) { return { data: text(data_out, "\n"), - functions: fn_bodies + functions: fn_bodies, + helpers: emit_helpers(qbe) } }