diff --git a/scripts/core/_sdl_video.js b/scripts/core/_sdl_video.js index 1b061be4..09cfaee4 100644 --- a/scripts/core/_sdl_video.js +++ b/scripts/core/_sdl_video.js @@ -390,7 +390,7 @@ function handle_renderer(msg) { case 'debugText': if (!msg.data || !msg.data.text) return {error: "Missing text"}; - ren.debugText(msg.data.pos || {x:0, y:0}, msg.data.text); + ren.debugText([msg.data.pos.x, msg.data.pos.y], msg.data.text); return {success: true}; case 'clipEnabled': diff --git a/scripts/core/io.js b/scripts/core/io.js deleted file mode 100644 index 57d48bc4..00000000 --- a/scripts/core/io.js +++ /dev/null @@ -1,19 +0,0 @@ -$_.unneeded(_ => { -}, Infinity) - -var subscribers = [] - -var os = use('os') - -$_.receiver(e => { - console.log(json.encode(e)) - return - if (e.op === "subscribe") { - if (!e.actor) throw Error('Got a subscribe message with no actor.'); - subscribers.push(e.actor) - return; - } - - for (var a of subscribers) - send(a, e); -}); diff --git a/scripts/modules/draw2d.js b/scripts/modules/draw2d.js index 55fd4378..0beab38b 100644 --- a/scripts/modules/draw2d.js +++ b/scripts/modules/draw2d.js @@ -184,13 +184,11 @@ draw.slice9 = function slice9(image, rect = [0,0], slice = 0, info = slice9_info }) } -draw.image = function image(image, rect = [0,0], rotation = 0, anchor = [0,0], shear = [0,0], info = {}, material) { +draw.image = function image(image, rect, rotation = 0, anchor = [0,0], shear = [0,0], info = {}, material) { + if (!rect) throw Error('Need rectangle to render image.') if (!image) throw Error('Need an image to render.') - // Ensure rect has proper structure - if (Array.isArray(rect)) { - rect = {x: rect[0], y: rect[1], width: 100, height: 100} // Default size - } + if (!('x' in rect && 'y' in rect)) throw Error('Must provide X and Y for image.') info = Object.assign({}, image_info, info); @@ -209,13 +207,13 @@ draw.circle = function render_circle(pos, radius, def, material) { draw.ellipse(pos, [radius,radius], def, material) } -draw.text = function text(text, rect, font = 'fonts/c64.ttf', size = 8, color = color.white, wrap = 0) { +draw.text = function text(text, pos, font = 'fonts/c64.ttf', size = 8, color = color.white, wrap = 0) { add_command("draw_text", { - text: text, - rect: rect, - font: font, - size: size, - wrap: wrap, + text, + pos, + font, + size, + wrap, material: {color} }) } diff --git a/tests/moth.js b/scripts/moth.js similarity index 70% rename from tests/moth.js rename to scripts/moth.js index a9b6502f..049a1b5d 100644 --- a/tests/moth.js +++ b/scripts/moth.js @@ -9,6 +9,58 @@ var transform = use('transform'); var rasterize = use('rasterize'); var video_actor = use('sdl_video') +var geometry = use('geometry') + +function worldToScreenRect({x,y,width,height}, camera, winW, winH) { + var bl = worldToScreenPoint([x,y], camera, winW, winH) + var tr = worldToScreenPoint([x+width, y+height], camera, winW, winH) + + return { + x: Math.min(bl.x, tr.x), + y: Math.min(bl.y, tr.y), + width: Math.abs(tr.x - bl.x), + height: Math.abs(tr.y - bl.y) + } +} + +function worldToScreenPoint([wx, wy], camera, winW, winH) { + // 1) world‐window origin (bottom‐left) + const worldX0 = camera.pos[0] - camera.size[0] * camera.anchor[0]; + const worldY0 = camera.pos[1] - camera.size[1] * camera.anchor[1]; + + // 2) normalized device coords [0..1] + const ndcX = (wx - worldX0) / camera.size[0]; + const ndcY = (wy - worldY0) / camera.size[1]; + + // 3) map into pixel‐space via the fractional viewport + const px = camera.viewport.x * winW + + ndcX * (camera.viewport.width * winW); + const py = camera.viewport.y * winH + + (1 - ndcY) * (camera.viewport.height * winH); + + return [ px, py ]; +} + +var camera = { + size: [500,500],//{width:500,height:500}, // pixel size the camera "sees", like its resolution + pos: [250,250],//{x:0,y:0}, // where it is + fov:50, + near_z:0, + far_z:1000, + viewport: {x:0,y:0,width:1,height:1}, // viewport it appears on screen + ortho:true, + anchor:[0.5,0.5],//{x:0.5,y:0.5}, + surface: undefined +} + +var util = use('util') +var cammy = util.camera_globals(camera) +console.log(cammy) + +var sq = {x:50,y:30,height:300,width:100} +console.log(json.encode(sq)) +console.log(json.encode(worldToScreenRect(sq, camera, 500, 500))) + var graphics var window @@ -24,7 +76,12 @@ $_.start(e => { send(video_actor, { kind: "window", - op:"create" + op:"create", + data: { + title: "Moth Test", + width: 500, + height: 500 + } }, e => { if (e.error) { console.error(e.error) @@ -56,14 +113,6 @@ var fps_samples = [] var fps_sample_count = 60 var fps_sum = 0 -// Engine state -var camera = { - x: 0, - y: 0, - scale: 1, - rotation: 0 -} - var images = {} // Convert high-level draw commands to low-level renderer commands @@ -82,6 +131,7 @@ function translate_draw_commands(commands) { switch(cmd.cmd) { case "draw_rect": + cmd.rect = worldToScreenRect(cmd.rect, camera,500, 500) // Handle rectangles with optional rounding and thickness if (cmd.opt && cmd.opt.radius && cmd.opt.radius > 0) { // Rounded rectangle @@ -94,7 +144,6 @@ function translate_draw_commands(commands) { data: {rect: raster_result.data} }) } else if (raster_result.type === 'rects') { - // SDL video expects 'rects' operation, not 'fillRects' raster_result.data.forEach(function(rect) { renderer_commands.push({ op: "fillRect", @@ -112,14 +161,12 @@ function translate_draw_commands(commands) { data: {rect: raster_result.data} }) } else if (raster_result.type === 'rects') { - // SDL video expects 'rects' operation with array renderer_commands.push({ op: "rects", data: {rects: raster_result.data} }) } } else { - // Filled rectangle renderer_commands.push({ op: "fillRect", data: {rect: cmd.rect} @@ -129,7 +176,7 @@ function translate_draw_commands(commands) { case "draw_circle": case "draw_ellipse": - break + cmd.pos = worldToScreenPoint(cmd.pos, camera, 500, 500) // Rasterize ellipse to points or rects var radii = cmd.radii || [cmd.radius, cmd.radius] var raster_result = rasterize.ellipse(cmd.pos, radii, cmd.opt || {}) @@ -151,11 +198,12 @@ function translate_draw_commands(commands) { case "draw_line": renderer_commands.push({ op: "line", - data: {points: cmd.points} + data: {points: cmd.points.map(p => worldToScreenPoint(p, camera, 500, 500))} }) break case "draw_point": + cmd.pos = worldToScreenPoint(cmd.pos, camera, 500, 500) renderer_commands.push({ op: "point", data: {points: [cmd.pos]} @@ -165,24 +213,31 @@ function translate_draw_commands(commands) { case "draw_image": var img = graphics.texture(cmd.image) if (!img.gpu) break - // TODO: Handle image loading and texture management + + cmd.rect.width ??= img.width + cmd.rect.height ??= img.height + cmd.rect = worldToScreenRect(cmd.rect, camera, 500, 500) + renderer_commands.push({ op: "texture", data: { texture_id: img.gpu.id, - dst: {x: cmd.rect.x, y: cmd.rect.y, width: img.width, height:img.height}, + dst: cmd.rect, src: {x:0,y:0,width:img.width,height:img.height}, } }) break case "draw_text": - // Use debugText for now + if (!cmd.text) break + if (!cmd.pos) break + var rect = worldToScreenRect({x:cmd.pos.x, y:cmd.pos.y, width:8, height:8}, camera, 500,500) + var pos = {x: rect.x, y: rect.y} renderer_commands.push({ op: "debugText", data: { - pos: cmd.pos || {x: 0, y: 0}, - text: cmd.text || "" + pos, + text: cmd.text } }) break @@ -199,6 +254,8 @@ function loop() var dt = now - last last = now +// camera.pos = camera.pos.map(x => x += 5*dt) + // Update the game send(gameactor, {kind:'update', dt:dt}, e => { // Get draw commands from game diff --git a/source/jsffi.c b/source/jsffi.c index 06fd58f8..ae4677f7 100644 --- a/source/jsffi.c +++ b/source/jsffi.c @@ -708,69 +708,75 @@ JSC_CCALL(os_make_text_buffer, arrfree(buffer); ) -shader_globals camera_globals(JSContext *js, JSValue camera) +JSValue js_util_camera_globals(JSContext *js, JSValue self, int argc, JSValue *argv) { - shader_globals data = {0}; - if (JS_IsUndefined(camera)) - return data; - - HMM_Vec2 size; - transform *transform; - double fov = 0; - int ortho; - double near_z = 0; - double far_z = 0; - HMM_Vec2 anchor; - - JS_GETPROP(js, size, camera, size, vec2) - JS_GETPROP(js, transform, camera, transform, transform) - JS_GETPROP(js, fov, camera, fov, number) - JS_GETPROP(js, ortho, camera,ortho,bool) - JS_GETPROP(js,near_z,camera,near_z,number) - JS_GETPROP(js,far_z,camera,far_z,number) - JS_GETPROP(js, anchor, camera, anchor, vec2) + JSValue camera = argv[0]; + if(JS_IsUndefined(camera)) return JS_UNDEFINED; - HMM_Mat4 proj; - HMM_Mat4 view; + HMM_Vec2 size; HMM_Vec3 pos; HMM_Quat rotation; + double fov = 0; int ortho; double near_z = 0; double far_z = 0; + HMM_Vec2 anchor; + + JS_GETPROP(js, size, camera, size, vec2) + JS_GETPROP(js, fov, camera, fov, number) + JS_GETPROP(js, ortho, camera, ortho, bool) + JS_GETPROP(js, near_z, camera, near_z, number) + JS_GETPROP(js, far_z, camera, far_z, number) + JS_GETPROP(js, anchor, camera, anchor, vec2) + JS_GETPROP(js, pos, camera, pos, vec3) + JS_GETPROP(js, rotation, camera, rotation, quat) - if (ortho) { + rotation.w = 1; + + HMM_Mat4 proj, view; + + if(ortho) { float left = -anchor.x * size.x; float bottom = -anchor.y * size.y; - float right = left + size.x; + float right = left + size.x; float top = bottom + size.y; - - proj = HMM_Orthographic_RH_NO( - left, right, - bottom, top, - -1.0f, 1.0f - ); - } - else { - proj = HMM_Perspective_RH_NO(fov, size.x/size.y,near_z,far_z); + proj = HMM_Orthographic_RH_NO(left, right, bottom, top, -1.0f, 1.0f); + } else { + proj = HMM_Perspective_RH_NO(fov, size.x/size.y, near_z, far_z); proj.Columns[1] = HMM_MulV4F(proj.Columns[1], -1.0f); } view = HMM_MulM4( - HMM_InvTranslate(HMM_Translate(transform->pos)), - HMM_InvRotate(HMM_QToM4(transform->rotation)) + HMM_InvTranslate(HMM_Translate(pos)), + HMM_InvRotate (HMM_QToM4(rotation)) ); - // Update your shader globals - data.world_to_projection = HMM_MulM4(proj, view); - data.projection_to_world = HMM_InvGeneralM4(data.world_to_projection); - data.camera_pos_world = transform->pos; - data.viewport_min_z = near_z; - data.viewport_max_z = far_z; - data.render_size = size; - data.world_to_view = view; - data.view_to_projection = proj; - data.camera_dir_world = HMM_NormV3(HMM_QVRot((HMM_Vec3){0,0,-1},transform->rotation)); - data.viewport_size = (HMM_Vec2){0.5,0.5}; - data.viewport_offset = (HMM_Vec2){0,0}; - data.time = SDL_GetTicksNS() / 1000000000.0f; + JSValue data = JS_NewObject(js); + + HMM_Mat4 world_to_projection = HMM_MulM4(proj, view); + HMM_Mat4 projection_to_world = HMM_InvGeneralM4(world_to_projection); + HMM_Vec3 camera_dir_world = HMM_NormV3( + HMM_QVRot((HMM_Vec3){0,0,-1}, rotation) + ); + + JS_SetPropertyStr(js, data, "world_to_projection", + JS_NewArrayBufferCopy(js, world_to_projection.em, + sizeof(float)*16)); + JS_SetPropertyStr(js, data, "projection_to_world", + JS_NewArrayBufferCopy(js, projection_to_world.em, + sizeof(float)*16)); + JS_SetPropertyStr(js, data, "world_to_view", + JS_NewArrayBufferCopy(js, view.em, sizeof(float)*16)); + JS_SetPropertyStr(js, data, "view_to_projection", + JS_NewArrayBufferCopy(js, proj.em, sizeof(float)*16)); + + JS_SetPropertyStr(js, data, "camera_pos_world", vec32js(js, pos)); + JS_SetPropertyStr(js, data, "camera_dir_world", vec32js(js, camera_dir_world)); + JS_SetPropertyStr(js, data, "render_size", vec22js(js, size)); + JS_SetPropertyStr(js, data, "viewport_size", vec22js(js, (HMM_Vec2){0.5,0.5})); + JS_SetPropertyStr(js, data, "viewport_offset", vec22js(js, (HMM_Vec2){0,0})); + JS_SetPropertyStr(js, data, "viewport_min_z", number2js(js, near_z)); + JS_SetPropertyStr(js, data, "viewport_max_z", number2js(js, far_z)); + return data; } + static JSValue floats2array(JSContext *js, float *vals, size_t len) { JSValue arr = JS_NewArray(js); for (size_t i = 0; i < len; i++) { @@ -1358,7 +1364,7 @@ JSC_CCALL(os_cull_sprites, int n = 0; JSValue sprites = argv[0]; - shader_globals info = camera_globals(js,argv[1]); + shader_globals info = {0}; // TODO: get this as a JS object rect camera_rect = {0}; camera_rect.x = info.camera_pos_world.x - info.render_size.x/2.0; camera_rect.y = info.camera_pos_world.y - info.render_size.y/2.0; @@ -1384,6 +1390,7 @@ JSC_CCALL(os_cull_sprites, static const JSCFunctionListEntry js_util_funcs[] = { MIST_FUNC_DEF(os, guid, 0), MIST_FUNC_DEF(os, insertion_sort, 2), + MIST_FUNC_DEF(util, camera_globals, 1), }; JSC_CCALL(graphics_hsl_to_rgb, @@ -1569,8 +1576,8 @@ void ffi_load(JSContext *js) arrput(rt->module_registry, MISTLINE(sprite)); arrput(rt->module_registry, MISTLINE(transform)); - arrput(rt->module_registry, MISTLINE(wota)); - arrput(rt->module_registry, MISTLINE(nota)); +// arrput(rt->module_registry, MISTLINE(wota)); +// arrput(rt->module_registry, MISTLINE(nota)); #ifndef NSTEAM arrput(rt->module_registry, MISTLINE(steam)); diff --git a/source/jsffi.h b/source/jsffi.h index cede040a..2eb39cc5 100644 --- a/source/jsffi.h +++ b/source/jsffi.h @@ -96,7 +96,4 @@ JSValue quads_to_mesh(JSContext *js, text_vert *buffer); SDL_Window *js2SDL_Window(JSContext *js, JSValue v); JSValue SDL_Window2js(JSContext *js, SDL_Window *w); -// Camera functions -shader_globals camera_globals(JSContext *js, JSValue camera); - #endif diff --git a/source/qjs_geometry.c b/source/qjs_geometry.c index 2e4a1475..5c867895 100644 --- a/source/qjs_geometry.c +++ b/source/qjs_geometry.c @@ -773,6 +773,43 @@ JSC_CCALL(gpu_make_sprite_queue, JS_FreeValue(js, mesh); ) +JSC_CCALL(geometry_rect_transform, + // argv[0] = world‐space rect + rect r = js2rect(js, argv[0]); + // argv[1] = world_to_projection (16 floats) + size_t byte_len; + float *data12 = JS_GetArrayBuffer(js, &byte_len, argv[1]); + HMM_Mat4 wp; memcpy(wp.Elements, data12, sizeof(wp.Elements)); + + // make our two corners at z=0 + HMM_Vec4 p0 = { r.x, r.y, 0.0f, 1.0f }; + HMM_Vec4 p1 = { r.x + r.w, r.y + r.h, 0.0f, 1.0f }; + + // transform into clip space + HMM_Vec4 t0 = HMM_MulM4V4(wp, p0); + HMM_Vec4 t1 = HMM_MulM4V4(wp, p1); + + // perspective divide → NDC + float ndc_x0 = t0.X / t0.W, ndc_y0 = t0.Y / t0.W; + float ndc_x1 = t1.X / t1.W, ndc_y1 = t1.Y / t1.W; + + // NDC → UV + float u0 = ndc_x0 * 0.5f + 0.5f; + float v0 = ndc_y0 * 0.5f + 0.5f; + float u1 = ndc_x1 * 0.5f + 0.5f; + float v1 = ndc_y1 * 0.5f + 0.5f; + + // (Optionally multiply by pixel‐size here, or do that in JS) + rect newrect = { + .x = u0, + .y = v0, + .w = u1 - u0, + .h = v1 - v0 + }; + + return rect2js(js, newrect); +) + static const JSCFunctionListEntry js_geometry_funcs[] = { MIST_FUNC_DEF(geometry, rect_intersection, 2), MIST_FUNC_DEF(geometry, rect_intersects, 2), @@ -783,6 +820,7 @@ static const JSCFunctionListEntry js_geometry_funcs[] = { MIST_FUNC_DEF(geometry, rect_point_inside, 2), MIST_FUNC_DEF(geometry, rect_pos, 1), MIST_FUNC_DEF(geometry, rect_move, 2), + MIST_FUNC_DEF(geometry, rect_transform, 2), MIST_FUNC_DEF(gpu, tile, 4), MIST_FUNC_DEF(gpu, slice9, 3), MIST_FUNC_DEF(gpu, make_sprite_mesh, 2), diff --git a/source/qjs_sdl_gpu.c b/source/qjs_sdl_gpu.c index 369a3df1..714931f9 100644 --- a/source/qjs_sdl_gpu.c +++ b/source/qjs_sdl_gpu.c @@ -1987,9 +1987,9 @@ JSC_CCALL(cmd_hud, ) JSC_CCALL(cmd_camera, - SDL_GPUCommandBuffer *cmds = js2SDL_GPUCommandBuffer(js, self); - shader_globals data = camera_globals(js, argv[0]); - SDL_PushGPUVertexUniformData(cmds, js2number(js,argv[1]), &data, sizeof(data)); +// SDL_GPUCommandBuffer *cmds = js2SDL_GPUCommandBuffer(js, self); +// shader_globals data = camera_globals(js, argv[0]); +// SDL_PushGPUVertexUniformData(cmds, js2number(js,argv[1]), &data, sizeof(data)); ) JSC_SCALL(cmd_push_debug_group, diff --git a/tests/prosperon.js b/tests/prosperon.js index 4f1467cc..ae7dfc6f 100644 --- a/tests/prosperon.js +++ b/tests/prosperon.js @@ -50,7 +50,7 @@ function draw() {color: color.yellow} ) - draw2d.image("tests/bunny") + draw2d.image("tests/bunny", {x:0,y:0}) // Return the draw commands return draw2d.get_commands()