// // benchmark_wota_nota_json.js // // Usage in QuickJS: // qjs benchmark_wota_nota_json.js // // Ensure wota, nota, json, and os are all available, e.g.: var wota = use('wota'); var nota = use('nota'); var json = use('json'); var jswota = use('jswota') var os = use('os'); // // Parse command line arguments if (arg.length != 2) { log.console('Usage: cell benchmark_wota_nota_json.ce '); $stop() } var lib_name = arg[0]; var scenario_name = arg[1]; //////////////////////////////////////////////////////////////////////////////// // 1. Setup "libraries" array to easily switch among wota, nota, and json //////////////////////////////////////////////////////////////////////////////// def libraries = [ { name: "wota", encode: wota.encode, decode: wota.decode, // wota produces an ArrayBuffer. We'll count `buffer.byteLength` as size. getSize(encoded) { return encoded.length; } }, { name: "nota", encode: nota.encode, decode: nota.decode, // nota also produces an ArrayBuffer: getSize(encoded) { return encoded.length; } }, { name: "json", encode: json.encode, decode: json.decode, // json produces a JS string. We'll measure its UTF-16 code unit length // as a rough "size". Alternatively, you could convert to UTF-8 for // a more accurate byte size. Here we just use `string.length`. getSize(encodedStr) { return encodedStr.length; } } ]; //////////////////////////////////////////////////////////////////////////////// // 2. Test data sets (similar to wota benchmarks). // Each scenario has { name, data, iterations } //////////////////////////////////////////////////////////////////////////////// def benchmarks = [ { name: "empty", data: [{}, {}, {}, {}], iterations: 10000 }, { name: "integers", data: [0, 42, -1, 2023], iterations: 100000 }, { name: "floats", data: [0.1, 1e-50, 3.14159265359], iterations: 100000 }, { name: "strings", data: ["Hello, wota!", "short", "Emoji: \u{1f600}\u{1f64f}"], iterations: 100000 }, { name: "objects", data: [ { a:1, b:2.2, c:"3", d:false }, { x:42, y:null, z:"test" } ], iterations: 50000 }, { name: "nested", data: [ [ [ [1,2], [3,4] ] ], [[[]]], [1, [2, [3, [4]]]] ], iterations: 50000 }, { name: "large_array", data: [ Array.from({length:1000}, (_, i) => i) ], iterations: 1000 }, ]; //////////////////////////////////////////////////////////////////////////////// // 3. Utility: measureTime(fn) => how long fn() takes in seconds. //////////////////////////////////////////////////////////////////////////////// function measureTime(fn) { let start = os.now(); fn(); let end = os.now(); return (end - start); // in seconds } //////////////////////////////////////////////////////////////////////////////// // 4. For each library, we run each benchmark scenario and measure: // - Encoding time (seconds) // - Decoding time (seconds) // - Total encoded size (bytes or code units for json) // //////////////////////////////////////////////////////////////////////////////// function runBenchmarkForLibrary(lib, bench) { // We'll encode and decode each item in `bench.data`. // We do 'bench.iterations' times. Then sum up total time. // Pre-store the encoded results for all items so we can measure decode time // in a separate pass. Also measure total size once. let encodedList = []; let totalSize = 0; // 1) Measure ENCODING let encodeTime = measureTime(() => { for (let i = 0; i < bench.iterations; i++) { // For each data item, encode it for (let j = 0; j < bench.data.length; j++) { let e = lib.encode(bench.data[j]); // store only in the very first iteration, so we can decode them later // but do not store them every iteration or we blow up memory. if (i == 0) { encodedList.push(e); totalSize += lib.getSize(e); } } } }); // 2) Measure DECODING let decodeTime = measureTime(() => { for (let i = 0; i < bench.iterations; i++) { // decode everything we stored during the first iteration for (let e of encodedList) { let decoded = lib.decode(e); // not verifying correctness here, just measuring speed } } }); return { encodeTime, decodeTime, totalSize }; } //////////////////////////////////////////////////////////////////////////////// // 5. Main driver: run only the specified library and scenario //////////////////////////////////////////////////////////////////////////////// // Find the requested library and scenario var lib = libraries.find(l => l.name == lib_name); var bench = benchmarks.find(b => b.name == scenario_name); if (!lib) { log.console('Unknown library:', lib_name); log.console('Available libraries:', libraries.map(l => l.name).join(', ')); $stop() } if (!bench) { log.console('Unknown scenario:', scenario_name); log.console('Available scenarios:', benchmarks.map(b => b.name).join(', ')); $stop() } // Run the benchmark for this library/scenario combination var { encodeTime, decodeTime, totalSize } = runBenchmarkForLibrary(lib, bench); // Output json for easy parsing by hyperfine or other tools var totalOps = bench.iterations * bench.data.length; var result = { lib: lib_name, scenario: scenario_name, encodeTime: encodeTime, decodeTime: decodeTime, totalSize: totalSize, totalOps: totalOps, encodeOpsPerSec: totalOps / encodeTime, decodeOpsPerSec: totalOps / decodeTime, encodeNsPerOp: (encodeTime / totalOps) * 1e9, decodeNsPerOp: (decodeTime / totalOps) * 1e9 }; log.console(result); $stop()