From 07595aad63f0a33c8f0ba183c21eb5f2f367b0c6 Mon Sep 17 00:00:00 2001 From: John Alanbrook Date: Fri, 1 Aug 2025 13:13:54 -0500 Subject: [PATCH] gpu backend uniform binding --- prosperon/draw2d.cm | 2 +- prosperon/prosperon.cm | 242 ++++++++++++++++++++++++++++++++--------- source/qjs_sdl_gpu.c | 21 ++-- source/quickjs.h | 16 --- 4 files changed, 201 insertions(+), 80 deletions(-) diff --git a/prosperon/draw2d.cm b/prosperon/draw2d.cm index 6cdcdf9c..44f34dd8 100644 --- a/prosperon/draw2d.cm +++ b/prosperon/draw2d.cm @@ -133,7 +133,7 @@ draw.slice9 = function slice9(image, rect = [0,0], slice = 0, info = slice9_info }) } -draw.image = function image(image, rect, scale = {x:1,y:1}, anchor, shear, info = {mode:"nearest"}, material = {color:{r:1,g:1,b:1,a:1}}) { +draw.image = function image(image, rect, scale = {x:1,y:1}, anchor, shear, info, material) { if (!rect) throw Error('Need rectangle to render image.') if (!image) throw Error('Need an image to render.') diff --git a/prosperon/prosperon.cm b/prosperon/prosperon.cm index 24151acb..553da3e0 100644 --- a/prosperon/prosperon.cm +++ b/prosperon/prosperon.cm @@ -193,32 +193,14 @@ shader_type = 'msl' var sampler_cache = {} function canonicalize_sampler(desc) { - if (desc == true) - return json.encode(default_sampler) - - var sampler_obj = {} - sampler_obj.__proto__ = default_sampler - - if (typeof desc == 'object') { - for (var key in desc) { - if (desc.hasOwnProperty(key)) { - sampler_obj[key] = desc[key] - } - } - } - - var keys = Object.keys(sampler_obj).sort() - var canonical = {} - for (var i = 0; i < keys.length; i++) - canonical[keys[i]] = sampler_obj[keys[i]] - - return json.encode(canonical) + return json.encode(desc) } function get_sampler(desc) { var key = canonicalize_sampler(desc) if (!sampler_cache[key]) { + var sampler_config = json.decode(key) sampler_cache[key] = new sdl_gpu.sampler(device, sampler_config) } @@ -296,7 +278,6 @@ function get_pipeline_for_material(mat = {}) { cfg.__proto__ = sprite_pipeline material_pipeline_cache[key] = load_pipeline(cfg) - log.console(`created pipeline for ${json.encode(cfg)}`) } return material_pipeline_cache[key]; @@ -304,9 +285,95 @@ function get_pipeline_for_material(mat = {}) { function load_pipeline(config) { - config.vertex = make_shader(config.vertex)[GPU] - config.fragment = make_shader(config.fragment)[GPU] - return new sdl_gpu.graphics_pipeline(device, config) + // pull back the JS shader objects (they have `.reflection`) + def vertShader = make_shader(config.vertex); + def fragShader = make_shader(config.fragment); + + // build the GPU pipeline + def gpuPipeline = new sdl_gpu.graphics_pipeline(device, { + vertex: vertShader[GPU], + fragment: fragShader[GPU], + // ...all the other config fields... + primitive: config.primitive, + blend: config.blend, + cull: config.cull, + face: config.face, + depth: config.depth, + stencil: config.stencil, + alpha_to_coverage: config.alpha_to_coverage, + multisample: config.multisample, + label: config.label, + target: config.target, + vertex_buffer_descriptions: config.vertex_buffer_descriptions, + vertex_attributes: config.vertex_attributes + }); + + // stash the reflection in the JS wrapper for easy access later + gpuPipeline._reflection = { + vertex: vertShader.reflection, + fragment: fragShader.reflection + }; + + return gpuPipeline; +} + +// Helper function to pack JavaScript objects into binary blob for UBOs +function pack_ubo(obj, ubo_type, reflection) { + var type_def = reflection.types[ubo_type]; + if (!type_def) { + log.console(`Warning: No type definition found for ${ubo_type}`); + return geometry.array_blob([]); + } + + var result_blob = new blob(); + + // Process each member in the UBO structure + for (var member of type_def.members) { + var value = obj[member.name]; + + if (value == null) { + if (member.type == "vec4") { + result_blob.write_blob(geometry.array_blob([1, 1, 1, 1])); + } else if (member.type == "vec3") { + result_blob.write_blob(geometry.array_blob([1, 1, 1])); + } else if (member.type == "vec2") { + result_blob.write_blob(geometry.array_blob([1, 1])); + } else if (member.type == "float") { + result_blob.write_blob(geometry.array_blob([1])); + } + continue; + } + + // Convert value to appropriate format based on type + if (member.type == "vec4") { + if (Array.isArray(value)) { + result_blob.write_blob(geometry.array_blob(value)); + } else if (typeof value == "object" && value.r != null) { + // Color object + result_blob.write_blob(geometry.array_blob([value.r, value.g, value.b, value.a || 1])); + } else { + // Single value, expand to vec4 + result_blob.write_blob(geometry.array_blob([value, value, value, value])); + } + } else if (member.type == "vec3") { + if (Array.isArray(value)) { + result_blob.write_blob(geometry.array_blob(value)); + } else if (typeof value == 'object' && value.r != null) + result_blob.write_blob(geometry.array_blob([value.r, value.g, value.b])); + else + result_blob.write_blob(geometry.array_blob([value, value, value])); + } else if (member.type == "vec2") { + if (Array.isArray(value)) { + result_blob.write_blob(geometry.array_blob(value)); + } else { + result_blob.write_blob(geometry.array_blob([value, value])); + } + } else if (member.type == "float") { + result_blob.write_blob(geometry.array_blob([value])); + } + } + + return stone(result_blob) } // Initialize ImGui with the window and renderer @@ -513,10 +580,7 @@ cmd_fns.camera = function(cmd) { if (cmd.camera.surface && !cmd.camera.surface[GPU]) { cmd.camera.surface[GPU] = new sdl_gpu.texture(device, cmd.camera.surface) - // Store the sampler description on the texture for later use - if (cmd.camera.surface.sampler != null) { - cmd.camera.surface[GPU].sampler_desc = cmd.camera.surface.sampler - } + // Don't store sampler on texture - samplers belong to materials } draw_queue.push(cmd) } @@ -536,15 +600,10 @@ function get_img_gpu(surface) samples: 0, type: "2d", format: "rgba8", - sampler: surface.sampler != null ? surface.sampler : true, + sampler: surface.sampler != null ? surface.sampler : default_sampler, color_target: true }) - // Store the sampler description on the texture for later use - if (surface.sampler != null) { - gpu.sampler_desc = surface.sampler - } - var tbuf = new sdl_gpu.transfer_buffer(device, { size: surface.pixels.length/8, usage: "upload" @@ -582,7 +641,7 @@ var draw_queue = [] var index_count = 0 var vertex_count = 0 -function render_geom(geom, img, pipeline = get_pipeline_for_material(null)) +function render_geom(geom, img, pipeline = get_pipeline_for_material(null), material = null) { if (!img[GPU]) { if (img.surface) @@ -601,6 +660,7 @@ function render_geom(geom, img, pipeline = get_pipeline_for_material(null)) draw_queue.push({ pipeline, texture: img[GPU], + material: material, num_indices: geom.num_indices, first_index: index_count, vertex_offset: vertex_count @@ -625,8 +685,12 @@ cmd_fns.draw_image = function(cmd) geom.indices = geometry.make_quad_indices(1) geom.num_indices = 6 + // Ensure material has diffuse property for dynamic binding + if (!cmd.material) cmd.material = {} + if (!cmd.material.diffuse) cmd.material.diffuse = img + var pipeline = get_pipeline_for_material(cmd.material) - render_geom(geom, img, pipeline) + render_geom(geom, img, pipeline, cmd.material) } cmd_fns.draw_text = function(cmd) @@ -644,36 +708,47 @@ cmd_fns.draw_text = function(cmd) font ) + // Ensure material has diffuse property for dynamic binding + if (!cmd.material) cmd.material = {} + if (!cmd.material.diffuse) cmd.material.diffuse = font + var pipeline = get_pipeline_for_material(cmd.material) - render_geom(mesh, font, pipeline) + render_geom(mesh, font, pipeline, cmd.material) } cmd_fns.tilemap = function(cmd) { var geometryCommands = cmd.tilemap.draw() - var pipeline = get_pipeline_for_material(cmd.material) - for (var geomCmd of geometryCommands) { var img = graphics.texture(geomCmd.image) if (!img) continue - render_geom(geomCmd.geometry, img, pipeline) + // Create a new material for each tile image with diffuse property + var tileMaterial = Object.assign({}, cmd.material || {}) + tileMaterial.diffuse = img + + var pipeline = get_pipeline_for_material(tileMaterial) + render_geom(geomCmd.geometry, img, pipeline, tileMaterial) } } cmd_fns.geometry = function(cmd) { - var pipeline = get_pipeline_for_material(cmd.material) - + var img if (typeof cmd.image == 'object') { - render_geom(cmd.geometry, cmd.image, pipeline) - return + img = cmd.image + } else { + img = graphics.texture(cmd.image) + if (!img) return } - var img = graphics.texture(cmd.image) - if (!img) return - render_geom(cmd.geometry, img, pipeline) + // Ensure material has diffuse property for dynamic binding + if (!cmd.material) cmd.material = {} + if (!cmd.material.diffuse) cmd.material.diffuse = img + + var pipeline = get_pipeline_for_material(cmd.material) + render_geom(cmd.geometry, img, pipeline, cmd.material) } cmd_fns.draw_slice9 = function(cmd) @@ -704,8 +779,12 @@ cmd_fns.draw_slice9 = function(cmd) var mesh = geometry.slice9(img, cmd.rect, slice_lrtb, slice_info) + // Ensure material has diffuse property for dynamic binding + if (!cmd.material) cmd.material = {} + if (!cmd.material.diffuse) cmd.material.diffuse = img + var pipeline = get_pipeline_for_material(cmd.material) - render_geom(mesh, img, pipeline) + render_geom(mesh, img, pipeline, cmd.material) } cmd_fns.draw_rect = function(cmd) @@ -720,7 +799,7 @@ cmd_fns.draw_rect = function(cmd) white_pixel[GPU] = get_img_gpu(white_pixel) var pipeline = get_pipeline_for_material(cmd.material) - render_geom(geom, {[GPU]: white_pixel[GPU]}, pipeline) + render_geom(geom, {[GPU]: white_pixel[GPU]}, pipeline, cmd.material) } var copy_pass @@ -829,6 +908,56 @@ prosperon.create_batch = function create_batch(draw_cmds, done) { current_camera_blob = null } + // Dynamic material binding - bind uniforms and textures from material + if (cmd.material && cmd.pipeline._reflection) { + def refl = cmd.pipeline._reflection; + + // Bind UBOs (uniform buffer objects) + if (refl.fragment && refl.fragment.ubos) { + for (def ubo of refl.fragment.ubos) { + def name = ubo.name; + def ubo_type = ubo.type; + + // For PSConstants or other UBOs, pack the material properties according to the UBO structure + def packed_blob = pack_ubo(cmd.material, ubo_type, refl.fragment); + + if (packed_blob && packed_blob.length > 0) { + // Push uniform data to both vertex and fragment stages +// render_queue.push_vertex_uniform_data(ubo.binding, packed_blob); + render_queue.push_fragment_uniform_data(ubo.binding, packed_blob); + } + } + } + + // Bind textures for any separate_images + if (refl.fragment && refl.fragment.separate_images) { + for (def imgDesc of refl.fragment.separate_images) { + def name = imgDesc.name; + def binding = imgDesc.binding; + def img = cmd.material[name]; + if (img) { + // Ensure texture is on GPU + if (!img[GPU]) { + if (img.surface) { + img[GPU] = get_img_gpu(img.surface); + } else if (img.cpu) { + img[GPU] = get_img_gpu(img.cpu); + } + } + + if (img[GPU]) { + // Use material's sampler or default_sampler + def sampler_desc = cmd.material.sampler || default_sampler; + render_pass.bind_samplers(false, binding, [{ + texture: img[GPU], + sampler: get_sampler(sampler_desc) + }]); + } + } + } + } + } + // Only bind buffers if not already bound or pipeline changed if (!buffers_bound) { render_pass.bind_buffers(0, [ @@ -850,12 +979,17 @@ prosperon.create_batch = function create_batch(draw_cmds, done) { current_camera_blob = cur_cam } - // Use texture's sampler if it has one, otherwise use standard sampler - var sampler_to_use = std_sampler - if (cmd.texture && cmd.texture.sampler_desc) { - sampler_to_use = get_sampler(cmd.texture.sampler_desc) + // Bind default texture if material didn't already bind "diffuse" + // Always bind the diffuse texture with material's sampler + if (cmd.texture) { + // Use material's sampler if specified, otherwise use default_sampler + var sampler_desc = (cmd.material && cmd.material.sampler) + ? cmd.material.sampler + : default_sampler + + var sampler_obj = get_sampler(sampler_desc) + render_pass.bind_samplers(false, 0, [{texture: cmd.texture, sampler: sampler_obj}]) } - render_pass.bind_samplers(false, 0, [{texture:cmd.texture, sampler: sampler_to_use}]) render_pass.draw_indexed( cmd.num_indices, diff --git a/source/qjs_sdl_gpu.c b/source/qjs_sdl_gpu.c index 08c71bd9..325933f8 100644 --- a/source/qjs_sdl_gpu.c +++ b/source/qjs_sdl_gpu.c @@ -13,11 +13,13 @@ #define QJSCLASSGPUWRAPPER(WRAPPERTYPE, SDLTYPE) \ typedef struct { \ SDL_GPUDevice *device; \ + JSValue js_device; \ SDL_##SDLTYPE *type; \ } WRAPPERTYPE; \ JSClassID js_SDL_##SDLTYPE##_id; \ static void js_SDL_##SDLTYPE##_finalizer(JSRuntime *rt, JSValue val) { \ WRAPPERTYPE *wrapper = JS_GetOpaque(val, js_SDL_##SDLTYPE##_id); \ + JS_FreeValueRT(rt, wrapper->js_device); \ if (wrapper && wrapper->device && wrapper->type) \ SDL_Release##SDLTYPE(wrapper->device, wrapper->type); \ free(wrapper); \ @@ -31,9 +33,10 @@ SDL_##SDLTYPE *js2SDL_##SDLTYPE(JSContext *js, JSValue val) { \ WRAPPERTYPE *wrapper = JS_GetOpaque(val, js_SDL_##SDLTYPE##_id); \ return wrapper ? wrapper->type : NULL; \ } \ -JSValue SDL_##SDLTYPE##2js(JSContext *js, SDL_GPUDevice *device, SDL_##SDLTYPE *member) { \ +JSValue SDL_##SDLTYPE##2js(JSContext *js, JSValue device, SDL_##SDLTYPE *member) { \ WRAPPERTYPE *wrapper = malloc(sizeof(WRAPPERTYPE)); \ - wrapper->device = device; \ + wrapper->js_device = JS_DupValue(js,device); \ + wrapper->device = js2SDL_GPUDevice(js, device); \ wrapper->type = member; \ JSValue j = JS_NewObjectClass(js, js_SDL_##SDLTYPE##_id); \ JS_SetOpaque(j, wrapper); \ @@ -719,7 +722,7 @@ static JSValue js_gpu_graphics_pipeline_constructor(JSContext *js, JSValueConst SDL_GPUGraphicsPipeline *pipeline = SDL_CreateGPUGraphicsPipeline(gpu, &info); if (!pipeline) return JS_ThrowInternalError(js, "Failed to create GPU pipeline: %s", SDL_GetError()); - return SDL_GPUGraphicsPipeline2js(js, gpu, pipeline); + return SDL_GPUGraphicsPipeline2js(js, argv[0], pipeline); } // Standalone sampler constructor: new sdl_gpu.sampler(device, config) @@ -750,7 +753,7 @@ static JSValue js_gpu_sampler_constructor(JSContext *js, JSValueConst self, int SDL_GPUSampler *sdl_sampler = SDL_CreateGPUSampler(gpu, &info); if (!sdl_sampler) return JS_ThrowInternalError(js, "Failed to create GPU sampler: %s", SDL_GetError()); - return SDL_GPUSampler2js(js, gpu, sdl_sampler); + return SDL_GPUSampler2js(js, argv[0], sdl_sampler); } JSC_CCALL(gpu_driver, @@ -807,7 +810,7 @@ static JSValue js_gpu_shader_constructor(JSContext *js, JSValueConst self, int a if (!shader) return JS_ThrowReferenceError(js, "Unable to create shader: %s", SDL_GetError()); - return SDL_GPUShader2js(js, gpu, shader); + return SDL_GPUShader2js(js, argv[0], shader); } JSC_CCALL(gpu_acquire_cmd_buffer, @@ -879,7 +882,7 @@ static JSValue js_gpu_compute_pipeline_constructor(JSContext *js, JSValueConst s SDL_GPUComputePipeline *pipeline = SDL_CreateGPUComputePipeline(gpu, &info); JS_FreeCString(js,info.entrypoint); if (!pipeline) return JS_ThrowReferenceError(js,"Could not create compute pipeline: %s", SDL_GetError()); - return SDL_GPUComputePipeline2js(js, gpu, pipeline); + return SDL_GPUComputePipeline2js(js, argv[0], pipeline); } // Standalone buffer constructor: new sdl_gpu.buffer(device, config) @@ -915,7 +918,7 @@ static JSValue js_gpu_buffer_constructor(JSContext *js, JSValueConst self, int a SDL_GPUBuffer *buffer = SDL_CreateGPUBuffer(gpu, &info); if (!buffer) return JS_ThrowReferenceError(js, "Unable to create buffer: %s", SDL_GetError()); - return SDL_GPUBuffer2js(js, gpu, buffer); + return SDL_GPUBuffer2js(js, argv[0], buffer); } static JSValue js_gpu_transfer_buffer_constructor(JSContext *js, JSValueConst self, int argc, JSValueConst *argv) { @@ -936,7 +939,7 @@ static JSValue js_gpu_transfer_buffer_constructor(JSContext *js, JSValueConst se SDL_GPUTransferBuffer *buffer = SDL_CreateGPUTransferBuffer(gpu, &info); if (!buffer) return JS_ThrowReferenceError(js, "Unable to create transfer buffer: %s", SDL_GetError()); - return SDL_GPUTransferBuffer2js(js, gpu, buffer); + return SDL_GPUTransferBuffer2js(js, argv[0], buffer); } // Standalone texture constructor: new sdl_gpu.texture(device, config) @@ -974,7 +977,7 @@ static JSValue js_gpu_texture_constructor(JSContext *js, JSValueConst self, int SDL_GPUTexture *tex = SDL_CreateGPUTexture(gpu, &info); if (!tex) return JS_ThrowReferenceError(js, "Unable to create texture: %s", SDL_GetError()); - JSValue jstex = SDL_GPUTexture2js(js, gpu, tex); + JSValue jstex = SDL_GPUTexture2js(js, argv[0], tex); JS_SetPropertyStr(js, jstex, "width", number2js(js, info.width)); JS_SetPropertyStr(js, jstex, "height", number2js(js, info.height)); JS_SetPropertyStr(js, jstex, "dim", vec22js(js, (HMM_Vec2){info.width, info.height})); diff --git a/source/quickjs.h b/source/quickjs.h index 0ff291b0..6bba1f31 100644 --- a/source/quickjs.h +++ b/source/quickjs.h @@ -789,22 +789,6 @@ JSValue JS_ParseJSON2(JSContext *ctx, const char *buf, size_t buf_len, JSValue JS_JSONStringify(JSContext *ctx, JSValueConst obj, JSValueConst replacer, JSValueConst space0); -typedef enum JSPromiseStateEnum { - JS_PROMISE_PENDING, - JS_PROMISE_FULFILLED, - JS_PROMISE_REJECTED, -} JSPromiseStateEnum; - -JSValue JS_NewPromiseCapability(JSContext *ctx, JSValue *resolving_funcs); -JSPromiseStateEnum JS_PromiseState(JSContext *ctx, JSValue promise); -JSValue JS_PromiseResult(JSContext *ctx, JSValue promise); - -/* is_handled = TRUE means that the rejection is handled */ -typedef void JSHostPromiseRejectionTracker(JSContext *ctx, JSValueConst promise, - JSValueConst reason, - JS_BOOL is_handled, void *opaque); -void JS_SetHostPromiseRejectionTracker(JSRuntime *rt, JSHostPromiseRejectionTracker *cb, void *opaque); - /* return != 0 if the JS code needs to be interrupted */ typedef int JSInterruptHandler(JSRuntime *rt, void *opaque); void JS_SetInterruptHandler(JSRuntime *rt, JSInterruptHandler *cb, void *opaque);