improve
This commit is contained in:
@@ -1,7 +0,0 @@
|
||||
Thank you for using Prosperon!
|
||||
|
||||
Provided are prosperon builds for all available platforms. Simply run prosperon for your platform in a game folder to play!
|
||||
|
||||
To get started, take a dive into the provided example games in the examples folder. You can either copy the prosperon executable into an example directory and run it there, or run `prosperon path/to/example` from the project root.
|
||||
|
||||
You can take a look through the docs folder for the prosperon manual to learn all about it. The manual is available on the web at [docs.prosperon.dev](https://docs.prosperon.dev).
|
||||
12645
audio/dr_flac.h
12645
audio/dr_flac.h
File diff suppressed because it is too large
Load Diff
5374
audio/dr_mp3.h
5374
audio/dr_mp3.h
File diff suppressed because it is too large
Load Diff
9009
audio/dr_wav.h
9009
audio/dr_wav.h
File diff suppressed because it is too large
Load Diff
173
audio/dsp.c
173
audio/dsp.c
@@ -1,173 +0,0 @@
|
||||
#include "cell.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
// dsp.mix_blobs(blobs, volumes)
|
||||
// blobs: Array of stoned blobs (stereo f32 PCM, all same length)
|
||||
// volumes: Array of floats (volume per blob)
|
||||
// returns: stoned blob (mixed audio)
|
||||
// All blobs must be the same byte length.
|
||||
JSC_CCALL(dsp_mix_blobs,
|
||||
if (argc < 2) return JS_ThrowTypeError(js, "dsp.mix_blobs(blobs, volumes) requires 2 arguments");
|
||||
|
||||
JSValue blobs_arr = argv[0];
|
||||
JSValue vols_arr = argv[1];
|
||||
if (!JS_IsArray(js, blobs_arr)) return JS_ThrowTypeError(js, "blobs must be an array");
|
||||
if (!JS_IsArray(js, vols_arr)) return JS_ThrowTypeError(js, "volumes must be an array");
|
||||
|
||||
int len = 0;
|
||||
JSValue len_val = JS_GetPropertyStr(js, blobs_arr, "length");
|
||||
JS_ToInt32(js, &len, len_val);
|
||||
JS_FreeValue(js, len_val);
|
||||
|
||||
if (len == 0) {
|
||||
// Return empty stoned blob
|
||||
return js_new_blob_stoned_copy(js, NULL, 0);
|
||||
}
|
||||
|
||||
// Get first blob to determine output size
|
||||
JSValue first_blob = JS_GetPropertyUint32(js, blobs_arr, 0);
|
||||
size_t out_bytes;
|
||||
float *first_data = (float*)js_get_blob_data(js, &out_bytes, first_blob);
|
||||
JS_FreeValue(js, first_blob);
|
||||
if (first_data == (void*)-1) return JS_EXCEPTION;
|
||||
if (out_bytes == 0) return js_new_blob_stoned_copy(js, NULL, 0);
|
||||
|
||||
size_t num_samples = out_bytes / sizeof(float);
|
||||
float *mix_buf = calloc(num_samples, sizeof(float));
|
||||
if (!mix_buf) return JS_ThrowOutOfMemory(js);
|
||||
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue blob_val = JS_GetPropertyUint32(js, blobs_arr, i);
|
||||
JSValue vol_val = JS_GetPropertyUint32(js, vols_arr, i);
|
||||
|
||||
size_t blob_len;
|
||||
float *blob_data = (float*)js_get_blob_data(js, &blob_len, blob_val);
|
||||
JS_FreeValue(js, blob_val);
|
||||
if (blob_data == (void*)-1) {
|
||||
JS_FreeValue(js, vol_val);
|
||||
free(mix_buf);
|
||||
return JS_EXCEPTION;
|
||||
}
|
||||
|
||||
double vol = 1.0;
|
||||
JS_ToFloat64(js, &vol, vol_val);
|
||||
JS_FreeValue(js, vol_val);
|
||||
|
||||
// Mix samples (use min length to avoid overrun)
|
||||
size_t samples = blob_len / sizeof(float);
|
||||
if (samples > num_samples) samples = num_samples;
|
||||
for (size_t s = 0; s < samples; s++) {
|
||||
mix_buf[s] += blob_data[s] * (float)vol;
|
||||
}
|
||||
}
|
||||
|
||||
JSValue result = js_new_blob_stoned_copy(js, mix_buf, out_bytes);
|
||||
free(mix_buf);
|
||||
return result;
|
||||
)
|
||||
|
||||
// dsp.lpf(blob, options)
|
||||
// blob: stoned blob (stereo f32 PCM)
|
||||
// options: { cutoff: 0.0-1.0 (normalized frequency), channels: 2 }
|
||||
// returns: stoned blob (filtered audio)
|
||||
// Simple one-pole lowpass filter per channel
|
||||
JSC_CCALL(dsp_lpf,
|
||||
if (argc < 2) return JS_ThrowTypeError(js, "dsp.lpf(blob, options) requires 2 arguments");
|
||||
|
||||
size_t len;
|
||||
float *data = (float*)js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == (void*)-1) return JS_EXCEPTION;
|
||||
if (len == 0) return js_new_blob_stoned_copy(js, NULL, 0);
|
||||
|
||||
// Get options
|
||||
double cutoff = 0.5;
|
||||
int32_t channels = 2;
|
||||
JSValue cutoff_val = JS_GetPropertyStr(js, argv[1], "cutoff");
|
||||
JSValue channels_val = JS_GetPropertyStr(js, argv[1], "channels");
|
||||
if (!JS_IsNull(cutoff_val)) JS_ToFloat64(js, &cutoff, cutoff_val);
|
||||
if (!JS_IsNull(channels_val)) JS_ToInt32(js, &channels, channels_val);
|
||||
JS_FreeValue(js, cutoff_val);
|
||||
JS_FreeValue(js, channels_val);
|
||||
|
||||
if (cutoff < 0.0) cutoff = 0.0;
|
||||
if (cutoff > 1.0) cutoff = 1.0;
|
||||
if (channels < 1) channels = 1;
|
||||
|
||||
// Compute filter coefficient (simple one-pole: y[n] = alpha*x[n] + (1-alpha)*y[n-1])
|
||||
// alpha = cutoff (0 = no signal, 1 = no filtering)
|
||||
float alpha = (float)cutoff;
|
||||
|
||||
size_t num_samples = len / sizeof(float);
|
||||
float *out = malloc(len);
|
||||
if (!out) return JS_ThrowOutOfMemory(js);
|
||||
|
||||
// Allocate state per channel
|
||||
float *prev = calloc(channels, sizeof(float));
|
||||
if (!prev) { free(out); return JS_ThrowOutOfMemory(js); }
|
||||
|
||||
for (size_t i = 0; i < num_samples; i++) {
|
||||
int ch = i % channels;
|
||||
float x = data[i];
|
||||
float y = alpha * x + (1.0f - alpha) * prev[ch];
|
||||
prev[ch] = y;
|
||||
out[i] = y;
|
||||
}
|
||||
|
||||
free(prev);
|
||||
JSValue result = js_new_blob_stoned_copy(js, out, len);
|
||||
free(out);
|
||||
return result;
|
||||
)
|
||||
|
||||
// dsp.silence(frames, channels)
|
||||
// Returns a stoned blob of silence (zeroed f32 samples)
|
||||
JSC_CCALL(dsp_silence,
|
||||
int32_t frames = 1024;
|
||||
int32_t channels = 2;
|
||||
if (argc >= 1) JS_ToInt32(js, &frames, argv[0]);
|
||||
if (argc >= 2) JS_ToInt32(js, &channels, argv[1]);
|
||||
if (frames < 0) frames = 0;
|
||||
if (channels < 1) channels = 1;
|
||||
|
||||
size_t bytes = (size_t)frames * channels * sizeof(float);
|
||||
float *buf = calloc(frames * channels, sizeof(float));
|
||||
if (!buf) return JS_ThrowOutOfMemory(js);
|
||||
|
||||
JSValue result = js_new_blob_stoned_copy(js, buf, bytes);
|
||||
free(buf);
|
||||
return result;
|
||||
)
|
||||
|
||||
// dsp.mono_to_stereo(blob)
|
||||
// Converts a mono f32 blob to stereo by duplicating samples
|
||||
JSC_CCALL(dsp_mono_to_stereo,
|
||||
size_t len;
|
||||
float *data = (float*)js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == (void*)-1) return JS_EXCEPTION;
|
||||
if (len == 0) return js_new_blob_stoned_copy(js, NULL, 0);
|
||||
|
||||
size_t mono_samples = len / sizeof(float);
|
||||
size_t stereo_bytes = mono_samples * 2 * sizeof(float);
|
||||
float *out = malloc(stereo_bytes);
|
||||
if (!out) return JS_ThrowOutOfMemory(js);
|
||||
|
||||
for (size_t i = 0; i < mono_samples; i++) {
|
||||
out[i * 2] = data[i];
|
||||
out[i * 2 + 1] = data[i];
|
||||
}
|
||||
|
||||
JSValue result = js_new_blob_stoned_copy(js, out, stereo_bytes);
|
||||
free(out);
|
||||
return result;
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_dsp_funcs[] = {
|
||||
MIST_FUNC_DEF(dsp, mix_blobs, 2),
|
||||
MIST_FUNC_DEF(dsp, lpf, 2),
|
||||
MIST_FUNC_DEF(dsp, silence, 2),
|
||||
MIST_FUNC_DEF(dsp, mono_to_stereo, 1)
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_dsp_funcs)
|
||||
115
audio/flac.c
115
audio/flac.c
@@ -1,115 +0,0 @@
|
||||
#include "cell.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define DR_FLAC_IMPLEMENTATION
|
||||
#include "dr_flac.h"
|
||||
|
||||
static int flac_calc_size(drflac *flac, drflac_uint64 frames, size_t *out_bytes)
|
||||
{
|
||||
if (!flac || !out_bytes)
|
||||
return -1;
|
||||
|
||||
if (flac->channels == 0)
|
||||
return -1;
|
||||
|
||||
size_t bytes_per_frame = (size_t)flac->channels * sizeof(drflac_int32);
|
||||
if (frames > SIZE_MAX / bytes_per_frame)
|
||||
return -1;
|
||||
|
||||
*out_bytes = (size_t)(frames * bytes_per_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static JSValue flac_make_info(JSContext *js, drflac *flac)
|
||||
{
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "channels", JS_NewInt32(js, flac->channels));
|
||||
JS_SetPropertyStr(js, obj, "sample_rate", JS_NewInt32(js, flac->sampleRate));
|
||||
JS_SetPropertyStr(js, obj, "bits_per_sample", JS_NewInt32(js, flac->bitsPerSample));
|
||||
JS_SetPropertyStr(js, obj, "total_pcm_frames", JS_NewFloat64(js, (double)flac->totalPCMFrameCount));
|
||||
JS_SetPropertyStr(js, obj, "decoded_bytes_per_frame",
|
||||
JS_NewInt32(js, (int)((size_t)flac->channels * sizeof(drflac_int32))));
|
||||
JS_SetPropertyStr(js, obj, "format", JS_NewString(js, "s32"));
|
||||
return obj;
|
||||
}
|
||||
|
||||
JSC_CCALL(flac_info,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid FLAC data");
|
||||
|
||||
drflac *flac = drflac_open_memory(data, len, NULL);
|
||||
if (!flac)
|
||||
return JS_ThrowReferenceError(js, "invalid FLAC data");
|
||||
|
||||
JSValue info = flac_make_info(js, flac);
|
||||
drflac_close(flac);
|
||||
return info;
|
||||
)
|
||||
|
||||
JSC_CCALL(flac_decode,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
if (!data)
|
||||
return JS_ThrowTypeError(js, "flac.decode expects a blob with data");
|
||||
|
||||
drflac *flac = drflac_open_memory(data, len, NULL);
|
||||
if (!flac)
|
||||
return JS_ThrowReferenceError(js, "invalid FLAC data");
|
||||
|
||||
size_t pcm_bytes;
|
||||
size_t bytes_per_frame = (size_t)flac->channels * sizeof(float);
|
||||
|
||||
if (flac->totalPCMFrameCount > SIZE_MAX / bytes_per_frame) {
|
||||
drflac_close(flac);
|
||||
return JS_ThrowRangeError(js, "FLAC data too large to decode");
|
||||
}
|
||||
pcm_bytes = (size_t)(flac->totalPCMFrameCount * bytes_per_frame);
|
||||
|
||||
float *pcm = NULL;
|
||||
if (pcm_bytes > 0) {
|
||||
pcm = malloc(pcm_bytes);
|
||||
if (!pcm) {
|
||||
drflac_close(flac);
|
||||
return JS_ThrowOutOfMemory(js);
|
||||
}
|
||||
}
|
||||
|
||||
drflac_uint64 frames_read = 0;
|
||||
if (pcm_bytes > 0)
|
||||
frames_read = drflac_read_pcm_frames_f32(flac, flac->totalPCMFrameCount, pcm);
|
||||
|
||||
size_t bytes_read = 0;
|
||||
if (pcm_bytes > 0)
|
||||
bytes_read = (size_t)(frames_read * bytes_per_frame);
|
||||
|
||||
JSValue result = flac_make_info(js, flac);
|
||||
|
||||
// Update format info
|
||||
JS_SetPropertyStr(js, result, "format", JS_NewString(js, "f32"));
|
||||
JS_SetPropertyStr(js, result, "decoded_bytes_per_frame", JS_NewInt32(js, (int)bytes_per_frame));
|
||||
|
||||
JSValue blob = js_new_blob_stoned_copy(js, pcm, bytes_read);
|
||||
JS_SetPropertyStr(js, result, "pcm", blob);
|
||||
free(pcm);
|
||||
drflac_close(flac);
|
||||
return result;
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_flac_funcs[] = {
|
||||
MIST_FUNC_DEF(flac, info, 1),
|
||||
MIST_FUNC_DEF(flac, decode, 1)
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_flac_funcs)
|
||||
|
||||
|
||||
|
||||
|
||||
105
audio/mp3.c
105
audio/mp3.c
@@ -1,105 +0,0 @@
|
||||
#include "cell.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define DR_MP3_IMPLEMENTATION
|
||||
#include "dr_mp3.h"
|
||||
|
||||
static JSValue mp3_make_info(JSContext *js, drmp3_uint32 channels, drmp3_uint32 sample_rate, drmp3_uint64 frames)
|
||||
{
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "channels", JS_NewInt32(js, channels));
|
||||
JS_SetPropertyStr(js, obj, "sample_rate", JS_NewInt32(js, sample_rate));
|
||||
JS_SetPropertyStr(js, obj, "bits_per_sample", JS_NewInt32(js, 16));
|
||||
|
||||
double total_frames = frames == DRMP3_UINT64_MAX ? -1.0 : (double)frames;
|
||||
JS_SetPropertyStr(js, obj, "total_pcm_frames", JS_NewFloat64(js, total_frames));
|
||||
JS_SetPropertyStr(js, obj, "decoded_bytes_per_frame",
|
||||
JS_NewInt32(js, (int)((size_t)channels * sizeof(drmp3_int16))));
|
||||
JS_SetPropertyStr(js, obj, "format", JS_NewString(js, "s16"));
|
||||
return obj;
|
||||
}
|
||||
|
||||
JSC_CCALL(mp3_info,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid MP3 data");
|
||||
|
||||
drmp3 mp3;
|
||||
if (!drmp3_init_memory(&mp3, data, len, NULL))
|
||||
return JS_ThrowReferenceError(js, "invalid MP3 data");
|
||||
|
||||
drmp3_uint32 channels = mp3.channels;
|
||||
drmp3_uint32 sample_rate = mp3.sampleRate;
|
||||
drmp3_uint64 frames = mp3.totalPCMFrameCount;
|
||||
if (frames == DRMP3_UINT64_MAX)
|
||||
frames = drmp3_get_pcm_frame_count(&mp3);
|
||||
|
||||
JSValue info = mp3_make_info(js, channels, sample_rate, frames);
|
||||
drmp3_uninit(&mp3);
|
||||
return info;
|
||||
)
|
||||
|
||||
static int mp3_calc_bytes(drmp3_uint32 channels, drmp3_uint64 frames, size_t *out_bytes)
|
||||
{
|
||||
if (!out_bytes || channels == 0)
|
||||
return -1;
|
||||
|
||||
size_t bytes_per_frame = (size_t)channels * sizeof(drmp3_int16);
|
||||
if (frames > SIZE_MAX / bytes_per_frame)
|
||||
return -1;
|
||||
|
||||
*out_bytes = (size_t)(frames * bytes_per_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
JSC_CCALL(mp3_decode,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid MP3 data");
|
||||
|
||||
drmp3_config config;
|
||||
drmp3_uint64 frames = 0;
|
||||
float *pcm = drmp3_open_memory_and_read_pcm_frames_f32(data, len, &config, &frames, NULL);
|
||||
if (!pcm)
|
||||
return JS_ThrowReferenceError(js, "failed to decode MP3 data");
|
||||
|
||||
size_t bytes_per_frame = (size_t)config.channels * sizeof(float);
|
||||
size_t total_bytes;
|
||||
|
||||
if (frames > SIZE_MAX / bytes_per_frame) {
|
||||
drmp3_free(pcm, NULL);
|
||||
return JS_ThrowRangeError(js, "MP3 output too large");
|
||||
}
|
||||
total_bytes = (size_t)(frames * bytes_per_frame);
|
||||
|
||||
JSValue result = mp3_make_info(js, config.channels, config.sampleRate, frames);
|
||||
|
||||
// Update format info
|
||||
JS_SetPropertyStr(js, result, "format", JS_NewString(js, "f32"));
|
||||
JS_SetPropertyStr(js, result, "decoded_bytes_per_frame", JS_NewInt32(js, (int)bytes_per_frame));
|
||||
|
||||
JSValue blob = js_new_blob_stoned_copy(js, pcm, total_bytes);
|
||||
JS_SetPropertyStr(js, result, "pcm", blob);
|
||||
drmp3_free(pcm, NULL);
|
||||
return result;
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_mp3_funcs[] = {
|
||||
MIST_FUNC_DEF(mp3, info, 1),
|
||||
MIST_FUNC_DEF(mp3, decode, 1)
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_mp3_funcs)
|
||||
|
||||
|
||||
|
||||
|
||||
116
audio/wav.c
116
audio/wav.c
@@ -1,116 +0,0 @@
|
||||
#include "cell.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define DR_WAV_IMPLEMENTATION
|
||||
#include "dr_wav.h"
|
||||
|
||||
static int wav_calc_size(drwav *wav, drwav_uint64 frames, size_t *out_bytes)
|
||||
{
|
||||
if (!wav || !out_bytes)
|
||||
return -1;
|
||||
|
||||
size_t bytes_per_frame = drwav_get_bytes_per_pcm_frame(wav);
|
||||
if (bytes_per_frame == 0)
|
||||
return -1;
|
||||
|
||||
if (frames > SIZE_MAX / bytes_per_frame)
|
||||
return -1;
|
||||
|
||||
*out_bytes = (size_t)(frames * bytes_per_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static JSValue wav_make_info(JSContext *js, drwav *wav)
|
||||
{
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "channels", JS_NewInt32(js, wav->channels));
|
||||
JS_SetPropertyStr(js, obj, "sample_rate", JS_NewInt32(js, wav->sampleRate));
|
||||
JS_SetPropertyStr(js, obj, "bits_per_sample", JS_NewInt32(js, wav->bitsPerSample));
|
||||
JS_SetPropertyStr(js, obj, "format_tag", JS_NewInt32(js, wav->translatedFormatTag));
|
||||
JS_SetPropertyStr(js, obj, "total_pcm_frames", JS_NewFloat64(js, (double)wav->totalPCMFrameCount));
|
||||
JS_SetPropertyStr(js, obj, "bytes_per_frame", JS_NewInt32(js, (int)drwav_get_bytes_per_pcm_frame(wav)));
|
||||
return obj;
|
||||
}
|
||||
|
||||
JSC_CCALL(wav_info,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid WAV data");
|
||||
|
||||
drwav wav;
|
||||
if (!drwav_init_memory(&wav, data, len, NULL))
|
||||
return JS_ThrowReferenceError(js, "invalid WAV data");
|
||||
|
||||
JSValue info = wav_make_info(js, &wav);
|
||||
drwav_uninit(&wav);
|
||||
return info;
|
||||
)
|
||||
|
||||
JSC_CCALL(wav_decode,
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid WAV data");
|
||||
|
||||
drwav wav;
|
||||
if (!drwav_init_memory(&wav, data, len, NULL))
|
||||
return JS_ThrowReferenceError(js, "invalid WAV data");
|
||||
|
||||
size_t pcm_bytes;
|
||||
// Calculate size for float output (channels * sizeof(float))
|
||||
size_t bytes_per_frame = wav.channels * sizeof(float);
|
||||
if (wav.totalPCMFrameCount > SIZE_MAX / bytes_per_frame) {
|
||||
drwav_uninit(&wav);
|
||||
return JS_ThrowRangeError(js, "WAV data too large");
|
||||
}
|
||||
pcm_bytes = (size_t)(wav.totalPCMFrameCount * bytes_per_frame);
|
||||
|
||||
float *pcm = NULL;
|
||||
if (pcm_bytes > 0) {
|
||||
pcm = malloc(pcm_bytes);
|
||||
if (!pcm) {
|
||||
drwav_uninit(&wav);
|
||||
return JS_ThrowOutOfMemory(js);
|
||||
}
|
||||
}
|
||||
|
||||
drwav_uint64 frames_read = 0;
|
||||
if (pcm_bytes > 0)
|
||||
frames_read = drwav_read_pcm_frames_f32(&wav, wav.totalPCMFrameCount, pcm);
|
||||
|
||||
size_t bytes_read = 0;
|
||||
if (pcm_bytes > 0) {
|
||||
bytes_read = (size_t)(frames_read * bytes_per_frame);
|
||||
}
|
||||
|
||||
JSValue result = wav_make_info(js, &wav);
|
||||
// Update format info to reflect f32
|
||||
JS_SetPropertyStr(js, result, "format", JS_NewString(js, "f32"));
|
||||
JS_SetPropertyStr(js, result, "bytes_per_frame", JS_NewInt32(js, (int)bytes_per_frame));
|
||||
|
||||
if (pcm_bytes > 0) {
|
||||
JSValue blob = js_new_blob_stoned_copy(js, pcm, bytes_read);
|
||||
JS_SetPropertyStr(js, result, "pcm", blob);
|
||||
free(pcm);
|
||||
} else {
|
||||
JS_SetPropertyStr(js, result, "pcm", js_new_blob_stoned_copy(js, NULL, 0));
|
||||
}
|
||||
|
||||
drwav_uninit(&wav);
|
||||
return result;
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_wav_funcs[] = {
|
||||
MIST_FUNC_DEF(wav, info, 1),
|
||||
MIST_FUNC_DEF(wav, decode, 1)
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_wav_funcs)
|
||||
281
camera.c
281
camera.c
@@ -1,281 +0,0 @@
|
||||
#include "sdl.h"
|
||||
#include "cell.h"
|
||||
#include "stb_ds.h"
|
||||
|
||||
#include <SDL3/SDL.h>
|
||||
|
||||
// SDL Free functions
|
||||
void SDL_Camera_free(JSRuntime *rt, SDL_Camera *cam)
|
||||
{
|
||||
SDL_CloseCamera(cam);
|
||||
}
|
||||
|
||||
// Class definitions for SDL types
|
||||
QJSCLASS(SDL_Camera,)
|
||||
|
||||
|
||||
// CAMERA FUNCTIONS
|
||||
|
||||
// Pixel format enum conversion using the new system
|
||||
ENUM_MAPPING_TABLE(SDL_PixelFormat) = {
|
||||
{SDL_PIXELFORMAT_UNKNOWN, "unknown"},
|
||||
{SDL_PIXELFORMAT_INDEX1LSB, "index1lsb"},
|
||||
{SDL_PIXELFORMAT_INDEX1MSB, "index1msb"},
|
||||
{SDL_PIXELFORMAT_INDEX2LSB, "index2lsb"},
|
||||
{SDL_PIXELFORMAT_INDEX2MSB, "index2msb"},
|
||||
{SDL_PIXELFORMAT_INDEX4LSB, "index4lsb"},
|
||||
{SDL_PIXELFORMAT_INDEX4MSB, "index4msb"},
|
||||
{SDL_PIXELFORMAT_INDEX8, "index8"},
|
||||
{SDL_PIXELFORMAT_RGB332, "rgb332"},
|
||||
{SDL_PIXELFORMAT_XRGB4444, "xrgb4444"},
|
||||
{SDL_PIXELFORMAT_XBGR4444, "xbgr4444"},
|
||||
{SDL_PIXELFORMAT_XRGB1555, "xrgb1555"},
|
||||
{SDL_PIXELFORMAT_XBGR1555, "xbgr1555"},
|
||||
{SDL_PIXELFORMAT_ARGB4444, "argb4444"},
|
||||
{SDL_PIXELFORMAT_RGBA4444, "rgba4444"},
|
||||
{SDL_PIXELFORMAT_ABGR4444, "abgr4444"},
|
||||
{SDL_PIXELFORMAT_BGRA4444, "bgra4444"},
|
||||
{SDL_PIXELFORMAT_ARGB1555, "argb1555"},
|
||||
{SDL_PIXELFORMAT_RGBA5551, "rgba5551"},
|
||||
{SDL_PIXELFORMAT_ABGR1555, "abgr1555"},
|
||||
{SDL_PIXELFORMAT_BGRA5551, "bgra5551"},
|
||||
{SDL_PIXELFORMAT_RGB565, "rgb565"},
|
||||
{SDL_PIXELFORMAT_BGR565, "bgr565"},
|
||||
{SDL_PIXELFORMAT_RGB24, "rgb24"},
|
||||
{SDL_PIXELFORMAT_BGR24, "bgr24"},
|
||||
{SDL_PIXELFORMAT_XRGB8888, "xrgb8888"},
|
||||
{SDL_PIXELFORMAT_RGBX8888, "rgbx8888"},
|
||||
{SDL_PIXELFORMAT_XBGR8888, "xbgr8888"},
|
||||
{SDL_PIXELFORMAT_BGRX8888, "bgrx8888"},
|
||||
{SDL_PIXELFORMAT_ARGB8888, "argb8888"},
|
||||
{SDL_PIXELFORMAT_RGBA8888, "rgba8888"},
|
||||
{SDL_PIXELFORMAT_ABGR8888, "abgr8888"},
|
||||
{SDL_PIXELFORMAT_BGRA8888, "bgra8888"},
|
||||
{SDL_PIXELFORMAT_XRGB2101010, "xrgb2101010"},
|
||||
{SDL_PIXELFORMAT_XBGR2101010, "xbgr2101010"},
|
||||
{SDL_PIXELFORMAT_ARGB2101010, "argb2101010"},
|
||||
{SDL_PIXELFORMAT_ABGR2101010, "abgr2101010"},
|
||||
{SDL_PIXELFORMAT_RGB48, "rgb48"},
|
||||
{SDL_PIXELFORMAT_BGR48, "bgr48"},
|
||||
{SDL_PIXELFORMAT_RGBA64, "rgba64"},
|
||||
{SDL_PIXELFORMAT_ARGB64, "argb64"},
|
||||
{SDL_PIXELFORMAT_BGRA64, "bgra64"},
|
||||
{SDL_PIXELFORMAT_ABGR64, "abgr64"},
|
||||
{SDL_PIXELFORMAT_RGB48_FLOAT, "rgb48_float"},
|
||||
{SDL_PIXELFORMAT_BGR48_FLOAT, "bgr48_float"},
|
||||
{SDL_PIXELFORMAT_RGBA64_FLOAT, "rgba64_float"},
|
||||
{SDL_PIXELFORMAT_ARGB64_FLOAT, "argb64_float"},
|
||||
{SDL_PIXELFORMAT_BGRA64_FLOAT, "bgra64_float"},
|
||||
{SDL_PIXELFORMAT_ABGR64_FLOAT, "abgr64_float"},
|
||||
{SDL_PIXELFORMAT_RGB96_FLOAT, "rgb96_float"},
|
||||
{SDL_PIXELFORMAT_BGR96_FLOAT, "bgr96_float"},
|
||||
{SDL_PIXELFORMAT_RGBA128_FLOAT, "rgba128_float"},
|
||||
{SDL_PIXELFORMAT_ARGB128_FLOAT, "argb128_float"},
|
||||
{SDL_PIXELFORMAT_BGRA128_FLOAT, "bgra128_float"},
|
||||
{SDL_PIXELFORMAT_ABGR128_FLOAT, "abgr128_float"},
|
||||
{SDL_PIXELFORMAT_YV12, "yv12"},
|
||||
{SDL_PIXELFORMAT_IYUV, "iyuv"},
|
||||
{SDL_PIXELFORMAT_YUY2, "yuy2"},
|
||||
{SDL_PIXELFORMAT_UYVY, "uyvy"},
|
||||
{SDL_PIXELFORMAT_YVYU, "yvyu"},
|
||||
{SDL_PIXELFORMAT_NV12, "nv12"},
|
||||
{SDL_PIXELFORMAT_NV21, "nv21"},
|
||||
{SDL_PIXELFORMAT_P010, "p010"},
|
||||
{SDL_PIXELFORMAT_RGBA32, "rgba32"}
|
||||
};
|
||||
JS2ENUM(SDL_PixelFormat)
|
||||
|
||||
static JSValue cameraspec2js(JSContext *js, const SDL_CameraSpec *spec) {
|
||||
JSValue obj = JS_NewObject(js);
|
||||
|
||||
JS_SetPropertyStr(js, obj, "format", SDL_PixelFormat2js(js, spec->format));
|
||||
JS_SetPropertyStr(js, obj, "colorspace", JS_NewInt32(js, spec->colorspace));
|
||||
JS_SetPropertyStr(js, obj, "width", JS_NewInt32(js, spec->width));
|
||||
JS_SetPropertyStr(js, obj, "height", JS_NewInt32(js, spec->height));
|
||||
JS_SetPropertyStr(js, obj, "framerate_numerator", JS_NewInt32(js, spec->framerate_numerator));
|
||||
JS_SetPropertyStr(js, obj, "framerate_denominator", JS_NewInt32(js, spec->framerate_denominator));
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
static SDL_CameraSpec js2cameraspec(JSContext *js, JSValue obj) {
|
||||
SDL_CameraSpec spec = {0};
|
||||
|
||||
JSValue v;
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "format");
|
||||
if (!JS_IsNull(v)) {
|
||||
spec.format = js2SDL_PixelFormat(js, v);
|
||||
}
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "colorspace");
|
||||
if (!JS_IsNull(v)) JS_ToInt32(js, &spec.colorspace, v);
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "width");
|
||||
if (!JS_IsNull(v)) JS_ToInt32(js, &spec.width, v);
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "height");
|
||||
if (!JS_IsNull(v)) JS_ToInt32(js, &spec.height, v);
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "framerate_numerator");
|
||||
if (!JS_IsNull(v)) JS_ToInt32(js, &spec.framerate_numerator, v);
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
v = JS_GetPropertyStr(js, obj, "framerate_denominator");
|
||||
if (!JS_IsNull(v)) JS_ToInt32(js, &spec.framerate_denominator, v);
|
||||
JS_FreeValue(js, v);
|
||||
|
||||
return spec;
|
||||
}
|
||||
|
||||
JSC_CCALL(camera_list,
|
||||
int num;
|
||||
JSValue jsids = JS_NewArray(js);
|
||||
SDL_CameraID *ids = SDL_GetCameras(&num);
|
||||
for (int i = 0; i < num; i++)
|
||||
JS_SetPropertyUint32(js,jsids, i, number2js(js,ids[i]));
|
||||
|
||||
return jsids;
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_open,
|
||||
int id = js2number(js,argv[0]);
|
||||
SDL_CameraSpec *spec_ptr = NULL;
|
||||
SDL_CameraSpec spec;
|
||||
|
||||
// Check if a format spec was provided
|
||||
if (argc > 1 && !JS_IsNull(argv[1])) {
|
||||
spec = js2cameraspec(js, argv[1]);
|
||||
spec_ptr = &spec;
|
||||
}
|
||||
|
||||
SDL_Camera *cam = SDL_OpenCamera(id, spec_ptr);
|
||||
if (!cam) ret = JS_ThrowReferenceError(js, "Could not open camera %d: %s\n", id, SDL_GetError());
|
||||
else
|
||||
ret = SDL_Camera2js(js,cam);
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_name,
|
||||
const char *name = SDL_GetCameraName(js2number(js,argv[0]));
|
||||
if (!name) return JS_ThrowReferenceError(js, "Could not get camera name from id %d.", (int)js2number(js,argv[0]));
|
||||
|
||||
return JS_NewString(js, name);
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_position,
|
||||
SDL_CameraPosition pos = SDL_GetCameraPosition(js2number(js,argv[0]));
|
||||
switch(pos) {
|
||||
case SDL_CAMERA_POSITION_UNKNOWN: return JS_NewString(js,"unknown");
|
||||
case SDL_CAMERA_POSITION_FRONT_FACING: return JS_NewString(js,"front");
|
||||
case SDL_CAMERA_POSITION_BACK_FACING: return JS_NewString(js,"back");
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_drivers,
|
||||
int num = SDL_GetNumCameraDrivers();
|
||||
JSValue arr = JS_NewArray(js);
|
||||
for (int i = 0; i < num; i++)
|
||||
JS_SetPropertyUint32(js, arr, i, JS_NewString(js, SDL_GetCameraDriver(i)));
|
||||
return arr;
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_supported_formats,
|
||||
SDL_CameraID id = js2number(js,argv[0]);
|
||||
int num;
|
||||
SDL_CameraSpec **specs = SDL_GetCameraSupportedFormats(id, &num);
|
||||
|
||||
if (!specs)
|
||||
return JS_ThrowReferenceError(js, "Could not get supported formats for camera %d: %s", id, SDL_GetError());
|
||||
|
||||
JSValue arr = JS_NewArray(js);
|
||||
for (int i = 0; i < num; i++) {
|
||||
JS_SetPropertyUint32(js, arr, i, cameraspec2js(js, specs[i]));
|
||||
}
|
||||
|
||||
SDL_free(specs);
|
||||
return arr;
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_camera_funcs[] = {
|
||||
MIST_FUNC_DEF(camera, list, 0),
|
||||
MIST_FUNC_DEF(camera, open, 2),
|
||||
MIST_FUNC_DEF(camera, name, 1),
|
||||
MIST_FUNC_DEF(camera, position, 1),
|
||||
MIST_FUNC_DEF(camera, drivers, 0),
|
||||
MIST_FUNC_DEF(camera, supported_formats, 1),
|
||||
};
|
||||
|
||||
JSC_CCALL(camera_capture,
|
||||
SDL_ClearError();
|
||||
SDL_Camera *cam = js2SDL_Camera(js,self);
|
||||
if (!cam) return JS_ThrowReferenceError(js,"Self was not a camera: %s", SDL_GetError());
|
||||
|
||||
SDL_Surface *surf = SDL_AcquireCameraFrame(cam, NULL);
|
||||
if (!surf) {
|
||||
const char *msg = SDL_GetError();
|
||||
if (msg[0] != 0)
|
||||
return JS_ThrowReferenceError(js,"Could not get camera frame: %s", SDL_GetError());
|
||||
else return JS_NULL;
|
||||
}
|
||||
|
||||
// Create a copy of the surface
|
||||
SDL_Surface *newsurf = SDL_CreateSurface(surf->w, surf->h, surf->format);
|
||||
|
||||
if (!newsurf) {
|
||||
SDL_ReleaseCameraFrame(cam, surf);
|
||||
return JS_ThrowReferenceError(js, "Could not create surface: %s", SDL_GetError());
|
||||
}
|
||||
|
||||
// Copy the surface data
|
||||
int result = SDL_BlitSurface(surf, NULL, newsurf, NULL);
|
||||
|
||||
// Release the camera frame
|
||||
SDL_ReleaseCameraFrame(cam, surf);
|
||||
|
||||
if (result != 0) {
|
||||
SDL_DestroySurface(newsurf);
|
||||
return JS_ThrowReferenceError(js, "Could not blit surface: %s", SDL_GetError());
|
||||
}
|
||||
|
||||
return SDL_Surface2js(js,newsurf);
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_get_driver,
|
||||
SDL_Camera *cam = js2SDL_Camera(js,self);
|
||||
if (!cam) return JS_ThrowReferenceError(js,"Self was not a camera: %s", SDL_GetError());
|
||||
|
||||
const char *driver = SDL_GetCurrentCameraDriver();
|
||||
if (!driver) return JS_NULL;
|
||||
|
||||
return JS_NewString(js, driver);
|
||||
)
|
||||
|
||||
JSC_CCALL(camera_get_format,
|
||||
SDL_Camera *cam = js2SDL_Camera(js,self);
|
||||
if (!cam) return JS_ThrowReferenceError(js,"Self was not a camera: %s", SDL_GetError());
|
||||
|
||||
SDL_CameraSpec spec;
|
||||
if (!SDL_GetCameraFormat(cam, &spec))
|
||||
return JS_ThrowReferenceError(js, "Could not get camera format: %s", SDL_GetError());
|
||||
|
||||
return cameraspec2js(js, &spec);
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_SDL_Camera_funcs[] =
|
||||
{
|
||||
MIST_FUNC_DEF(camera, capture, 0),
|
||||
MIST_FUNC_DEF(camera, get_driver, 0),
|
||||
MIST_FUNC_DEF(camera, get_format, 0),
|
||||
};
|
||||
|
||||
CELL_USE_INIT(
|
||||
SDL_Init(SDL_INIT_CAMERA);
|
||||
JSValue mod = JS_NewObject(js);
|
||||
JS_SetPropertyFunctionList(js,mod,js_camera_funcs,countof(js_camera_funcs));
|
||||
QJSCLASSPREP_FUNCS(SDL_Camera)
|
||||
return mod;
|
||||
)
|
||||
5
clay.cm
5
clay.cm
@@ -7,11 +7,10 @@ var layout = use('layout')
|
||||
var geometry = use('geometry')
|
||||
var draw = use('draw2d')
|
||||
var graphics = use('graphics')
|
||||
var input = use('sdl/input')
|
||||
var prosperon = use('prosperon')
|
||||
|
||||
var CHILDREN = key('children')
|
||||
var PARENT = key('parent')
|
||||
var CHILDREN = 'children'
|
||||
var PARENT = 'parent'
|
||||
|
||||
function normalizeSpacing(spacing) {
|
||||
if (typeof spacing == 'number') {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Separates input concerns from layout/rendering
|
||||
|
||||
var geometry = use('geometry')
|
||||
var point = use('point')
|
||||
var clay = use('clay')
|
||||
|
||||
var clay_input = {}
|
||||
|
||||
1397
cute_aseprite.h
1397
cute_aseprite.h
File diff suppressed because it is too large
Load Diff
@@ -75,7 +75,6 @@ static const JSCFunctionListEntry js_datastream_funcs[] = {
|
||||
CGETSET_ADD(datastream, callback),
|
||||
};
|
||||
|
||||
|
||||
static void render_frame(plm_t *mpeg, plm_frame_t *frame, void *d) {
|
||||
datastream *ds = d;
|
||||
if (JS_IsNull(ds->callback)) return;
|
||||
|
||||
152
docs/graphics.md
Normal file
152
docs/graphics.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# Graphics drawing
|
||||
|
||||
## Terminology
|
||||
Texture - a set of bytes on the GPU, not directly accessible
|
||||
Surface - a set of bytes in RAM, modifiable
|
||||
rect - a rectangle of {x,y,width,height}
|
||||
Image - combination of a texture and rect, where the rect defines the UV coordinates on the texture to draw
|
||||
|
||||
# Drawing, cameras, viewports, logical size, and so on
|
||||
|
||||
A camera is a view into the game world. A camera can be "rendered", which means it renders the world, and what it can see in the world. A camera may draw to a surface, or to the main window. Objects in the world will render so that if their position is equal to the camera position, that is in the center of the screen. HUD functions always render so [0,0] is the bottom left of the camera's view.
|
||||
|
||||
Cameras always draw to their own render target. Then, they draw that render target to the framebuffer.
|
||||
|
||||
# COORDINATES
|
||||
Screen coordinates start in the upper left corner at [0,0] and extend to the bottom right, in pixels. Raw mouse coordinates are in these.
|
||||
|
||||
# RENDERING PIPELINE
|
||||
|
||||
In prosperon, you call graphics rendering functions at well defined hook points. These are interleaved as necessary with predefined created objects, like sprites, 3d world models, and so on.
|
||||
|
||||
The engine stores a command buffer. When you issue "draw" commands, they are recorded into the command buffer. These are batched as much as they can be; if there is no significant state change between, the draw commands can be coalesced into one. Then, for each camera, the draw commands are executed.
|
||||
|
||||
# RENDERING COMPONENTS
|
||||
## MATERIALS
|
||||
A material defines the inputs to a shader.
|
||||
|
||||
## PIPELINES
|
||||
Pipelines are how the rendering engine is set up. Switching pipelines can be done for special effects.
|
||||
|
||||
## SPECIAL EFFECTS
|
||||
Sometimes you want a special effect. While there are many objects in prosperon you can create and have the engine handle for you, a special effect typically requires a bit of code.
|
||||
|
||||
# LAYERS
|
||||
All things that draw have a layer. If no layer is set, the implicit layer is "0". Even draw and hud functions have a layer. To draw a draw function on a specific layer, set that function's "layer". ie,
|
||||
|
||||
this.draw = function() { render.rect(); }
|
||||
this.draw.layer = -5;
|
||||
|
||||
Now that layer will draw at the -5 layer.
|
||||
|
||||
# CAMERAS
|
||||
Everything is drawn via cameras. Cameras can draw directly to the screen, or they can draw to an offscreen render target. By default, everything is drawn to all cameras. There will eventually be a tag that lets you filter what is drawn to specifc cameras.
|
||||
|
||||
Cameras have a resolution they draw at, "size".
|
||||
|
||||
## TEXTURES
|
||||
|
||||
Anatomy of rendpering an image render.image(path)
|
||||
Path can be a file like "toad"
|
||||
If this is a gif, this would display the entire range of the animation
|
||||
It can be a frame of animation, like "frog.0"
|
||||
If it's an aseprite, it can have multiple animations, like "frog.walk.0"
|
||||
file^ frame^ idx
|
||||
|
||||
render.image("frog.walk.0",
|
||||
game.image("frog.walk.0") ==> retrieve
|
||||
|
||||
image = {
|
||||
texture: "spritesheet.png",
|
||||
rect: [x,y,w,h],
|
||||
time: 100
|
||||
},
|
||||
|
||||
frames: {
|
||||
toad: {
|
||||
x: 4,
|
||||
y: 5,
|
||||
w: 10,
|
||||
h: 10
|
||||
},
|
||||
frog: {
|
||||
|
||||
walk: [
|
||||
{ texture: spritesheet.png, x: 10, y:10, w:6,h:6, time: 100 },
|
||||
{ texture: spritesheet.png, x:16,y:10,w:6,h:6,time:100} <--- two frame walk animation
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
texture frog {
|
||||
texture: {"frog.png"}, <--- this is the actual thing to send to the gpu
|
||||
x:0,
|
||||
y:0,
|
||||
w:10,
|
||||
h:10
|
||||
},
|
||||
|
||||
|
||||
## RENDER MODES
|
||||
/* rendering modes
|
||||
ps1
|
||||
gouraud
|
||||
diffuse // 16 bit color, 5-5-5
|
||||
7 dynamic lights, 1 ambient
|
||||
textures are affine
|
||||
no vertex skinning
|
||||
256x256 texture max (generally 128x128)
|
||||
320x240, variable up to 640x480
|
||||
|
||||
n64
|
||||
gouraud
|
||||
diffuse
|
||||
combiner // a secondary texture sometimes used to combine
|
||||
7 dynamic lights, 1 ambient
|
||||
320x240, or 640x480
|
||||
|
||||
sega saturn
|
||||
gouraud
|
||||
diffuse
|
||||
320x240 or 640x480
|
||||
|
||||
ps2
|
||||
phong
|
||||
diffuse
|
||||
combiner // second texture for modulation of diffuse
|
||||
combine_mode // int for how to combine
|
||||
|
||||
dreamcast
|
||||
phong
|
||||
diffuse
|
||||
combiner // second texture; could be an environment map, or emboss bump mapping
|
||||
fog
|
||||
640x480
|
||||
640x448, special mode to 1280x1024
|
||||
|
||||
gamecube
|
||||
phong
|
||||
diffuse
|
||||
+7 textures // wow!
|
||||
8 dynamic lights
|
||||
640x480
|
||||
|
||||
*/
|
||||
|
||||
/* meshes
|
||||
position (float3)
|
||||
color (rgba)
|
||||
uv
|
||||
*/
|
||||
|
||||
/* materials, modern pbr
|
||||
any object can act as a "material". The engine expects some standardized things:
|
||||
diffuse - base color texture
|
||||
bump - a normal map for dot3 bump maping used in phong shading
|
||||
height - a grayscale heightmap
|
||||
occlusion - ambient occlusion texture
|
||||
emission - texture for where model emits light
|
||||
bump2 - a second normal map for detail
|
||||
metallic - a metal/smoothness map
|
||||
specular - specular map, alternative for the metallic workflow
|
||||
*/
|
||||
186
docs/ops.md
Normal file
186
docs/ops.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# RENDERING PIPELINE
|
||||
The basic flow for developing graphics here:
|
||||
|
||||
1) develop a render graph
|
||||
2) decide what to draw
|
||||
|
||||
The render graph is the "big idea" of how the data flows through a render; inside the execution, you utilize "what to draw".
|
||||
|
||||
Prosperon provides you with functions to facilitate the creation of rendering pipelines. For example, you could use "shadow_vol" function to create buffer geometry with shadow volume data.
|
||||
|
||||
Unity has a "graphics.rendermesh" function that you can call, and that unity automatically calls for renderer components. It is the same here. But there are a handful of other types to draw, particularly for 2d.
|
||||
|
||||
## 2D
|
||||
### Anatomy of a 2d renderer
|
||||
Traditionally, 2d rendering is a mix of tilemaps and sprites. Today, it is still more cost effective to render tilemaps, but we have a lot more flexibility.
|
||||
|
||||
NES
|
||||
Nes had 1 tilemap and up to 8 sprites per scanline.
|
||||
|
||||
SNES
|
||||
Up to 4 tilemap backgrounds, with priority, and flipping capability. 32 sprites per scanline, and by setting the priority correctly, they could appear behind background layers.
|
||||
|
||||
GB
|
||||
One background layer, 10 sprites per scanline/40 per frame.
|
||||
|
||||
GBA
|
||||
Up to 4 layers, sprites with affine transforms!
|
||||
|
||||
DS
|
||||
Up to 4 layers; many sprites; and a 3d layer!
|
||||
|
||||
Sega saturn
|
||||
This and everything else with generic vertex processing could do as many background layers and sprites as desired. This is what you get with prosperon on most modern computers. For more limited hardware, your options become limited too!
|
||||
|
||||
### Prosperon rendering
|
||||
Layers
|
||||
Every drawable 2d thing has a layer. This is an integer that goes from -9223372036854775808 to 9223372036854775808.
|
||||
|
||||
!!! On hardware that supports only a limited number of layers, this value must go from 0 to (layer #).
|
||||
|
||||
Layer sort
|
||||
Within a layer, objects are sorted based on a given criteria. By default, this is nothing, and the engine may reorder the draws to optimize for performance. Instead, you can choose to sort by their y axis position, for example.
|
||||
|
||||
Parallax
|
||||
Layers can have a defined parallax value, set at the engine level. Anything on that layer will move with the provided parallax. Each layer has an implicit parallax value of "1", which means it moves "as expected". Below 1 makes it move slower (0 makes it not move at all), 2 makes it move twice as fast, etc.
|
||||
|
||||
Tilemaps
|
||||
These are highly efficient and work just like tilemaps on old consoles. When you submit one of these to draw, Prosperon can efficientally cull what can't be seen by the camera. You can have massive levels with these without any concern for performance. A tilemap is all on its own layer.
|
||||
|
||||
Tiles can be flipped; and the entire tilemap can have an affine transformation applied to it.
|
||||
|
||||
Sprites each have their own layer and affine transform. Tilemaps are just like a large sprite.
|
||||
|
||||
In addition to all of this, objects can have a "draw" event, wherein you can issue direct drawing commands like "render.sprite", "render.text", "render.circle", and so on. This can be useful for special effects, like multi draw passes (set stencil -> draw -> revert stencil). In this case, it is the draw event itself with the layer setting.
|
||||
|
||||
## 3D
|
||||
3d models are like 3d sprites. Add them to the world, and then the engine handles drawing them. If you want special effects, its "draw" command can be overridden.
|
||||
|
||||
As sprites and 3d models are sent to render, they are added to a list; sorted; and then finally rendered.
|
||||
|
||||
## THE RENDERER
|
||||
## Fully scriptable
|
||||
|
||||
The render layer is where you do larger scale organizing. For example, for a single outline, you might have an object's draw method be the standard:
|
||||
- draw the model, setting stencil
|
||||
- draw a scaled up model with a single color
|
||||
|
||||
But, since each object doing this won't merge their outlines, you need a larger order solution, wherein you draw *all* models that will be outlined, and then draw *all* scaled up models with a single color. The render graph is how you could do that. The render graph calls draw and render functions; so with a tag system, you can essentially choose to draw whatever you want. You can add new shadow passes; whatever. Of course, prosperon is packed with some standard render graphs to utilize right away.
|
||||
|
||||
Each graphical drawing command has a specific pipeline. A pipeline is a static object that defines every rendering detail of a drawing command.
|
||||
|
||||
A drawing command is composed of:
|
||||
- a model
|
||||
- a material
|
||||
- a pipeline
|
||||
|
||||
The engine handles sorting these and rendering them effectively. There exist helper functions, like "render.image" which will in turn create a material and use the correct model.
|
||||
|
||||
You execute a list of drawing commands onto a render target. This might be the computer screen; it might be an offscreen target.
|
||||
|
||||
The material's properties are copied into the shader on a given pipeline; they also can have extra properties like "castshadows", "getshadows", and so on.
|
||||
|
||||
An *image* is a struct {
|
||||
texture: GPU texture
|
||||
rect: UV coordinates
|
||||
}
|
||||
|
||||
## 2D drawing commands
|
||||
The 2d drawing commands ultimately interface with a VERY limited subset of backend knowledge, and so are easily adaptable for a wide variety of hardware and screen APIs.
|
||||
|
||||
The basic 2D drawing techniques are:
|
||||
Sprite - arbitrarily blit a bitmap to the screen with a given affine transformation and color
|
||||
Tiles - Uniform squares in a grid pattern, drawn all on a single layer
|
||||
Text - Generates whatever is needed to display text wrapped in a particular way at a particular coordinate
|
||||
Particles - a higher order construction
|
||||
Geometry - programmer called for circles or any other arbitrary shape. Might be slow!
|
||||
|
||||
## Effects
|
||||
An "effect" is essentially a sequence of render commands. Typically, a sprite draws itself to a screen. It may have a unique pipeline for a special effect. But it might also have an "effect", which is actually a sequence of draw instructions. An example might be an outline scenario, where the sprite draws a black version of it scaled 1.1x, and then draws with the typical pipeline afterwards.
|
||||
|
||||
## A frame
|
||||
During a frame, the engine finds everything that needs rendered. This includes enabled models, enabled sprites, tilemaps, etc. This also includes programmer directions inside of the draw() and hud() functions.
|
||||
|
||||
This high level commands are culled down, accounting for off screen sprites, etc, into a more compact command queue. This command queue is then rendered in whichever way the backend sees fit. Each "command queue" maps roughly into a "render pass" in vulkan. Once you submit a command queue, the data is sorted, required data is uploaded, and a render pass draws it to the specified frame.
|
||||
|
||||
A command is kicked off with a "batch" command.
|
||||
|
||||
var batch = render.batch(target, clearcolor) // target is the target buffer to draw onto
|
||||
target must be known when the batch starts because it must ensure the pipelines fed into it are compatible. If clearcolor is undefined, it does not erase what is present on the target before drawing. To disable depth, simply do not include a depth attachment in the target.
|
||||
|
||||
batch.draw(mesh, material, pipeline)
|
||||
This is the most fundamental draw command you can do. In modern parlance, the pipeline sets up the GPU completely for rendering (stencil, blend, shaders, etc); the material plugs data into the pipeline, via reflection; the mesh determines the geometry that is drawn. A mesh defines everything that's needed to kick of a draw call, including if the buffers are indexed or not, the number of indices to draw, and the first index to draw from.
|
||||
|
||||
batch.viewport()
|
||||
|
||||
batch.sprite
|
||||
|
||||
|
||||
batch.text // a text object. faster than doing each letter as a sprite, but less flexible
|
||||
// etc
|
||||
batch.render(camera)
|
||||
|
||||
Batches can be saved to be executed again and again. So, one set of batches can be created, and then drawn from many cameras' perspectives. batch.render must take a camera
|
||||
|
||||
Behind the scenes, a batch tries to merge geometry, and does reordering for minimum pipeline changes behind the scenes.
|
||||
|
||||
Each render command can use its own unique pipeline, which entails its own shader, stencil buffer setup, everything. It is extremely flexible. Sprites can have their own pipeline.
|
||||
|
||||
ULTIMATELY:::
|
||||
This is a much more functional style than what is typically presented from graphics APIs. Behind the scenes these are all translated to OpenGL or whatever; being functional at this level helps to optimize.
|
||||
|
||||
IMPORTANT NOTE:
|
||||
Optimization only happens at the object level. If you have two pipelines with the exact same characteristics, they will not be batched. Use the exact same pipeline object to batch.
|
||||
|
||||
## SCENARIOS
|
||||
|
||||
BLOOM BULLETS
|
||||
You want to draw a background; some ships; and some bullets that have glow to them. This amounts to two ideas:
|
||||
1) draw the background and ships
|
||||
2) draw bullets to a texture
|
||||
3) apply bloom on the bullet
|
||||
4) draw bullets+bloom over the background and ships
|
||||
|
||||
Steps 1, and 2-3, can be done in parallel. They constitute their own command queues. When both are done, the composite can then happen.
|
||||
|
||||
var bg_batch = render.batch(surf1, camera);
|
||||
bg_batch.draw(background)
|
||||
bg_batch.draw(ships)
|
||||
bg_batch.end()
|
||||
|
||||
var bullet_batch = render.batch(surf2, camera);
|
||||
bullet_batch.draw(bullets)
|
||||
bullet_batch.end()
|
||||
|
||||
var bloom = render.batch(surf3, postcam)
|
||||
bloom.draw(bullet_batch.color, bloom_pipeline)
|
||||
bloom.end()
|
||||
|
||||
var final = render.batch(swapchain)
|
||||
final.draw(bg_batch.color)
|
||||
final.draw(bloom.color)
|
||||
final.end()
|
||||
|
||||
When 'batch.end' is called, it reorders as needed, uploads data, and then does a render pass.
|
||||
|
||||
3D GAME WITH DIRECTIONAL LIGHT SHADOW MAP
|
||||
|
||||
var shadow_batch = render.batch(shadow_surf, dir_T)
|
||||
shadow_batch.draw(scene, depth_mat) // scene returns a list of non culled 3d obejcts; we force it to use depth_mat
|
||||
shadow_batch.end()
|
||||
|
||||
base_mat.shadowmap = shadow_batch.color;
|
||||
|
||||
var main_batch = render.batch(swapchain, camera)
|
||||
main_batch.draw(scene)
|
||||
main_batch.end()
|
||||
|
||||
FIERY LETTERS
|
||||
This pseudo code draws a "hello world" cutout, with fire behind it, and then draws the game's sprites over that
|
||||
|
||||
var main = render.batch(swapchain, 2dcam)
|
||||
main.draw("hello world", undefined, stencil_pipeline)
|
||||
main.draw(fire)
|
||||
main.draw(fullscreen, undefined, stencil_reset)
|
||||
main.draw(game)
|
||||
main.end()
|
||||
@@ -1,7 +1,6 @@
|
||||
#include "cell.h"
|
||||
#include "prosperon.h"
|
||||
|
||||
#include <SDL3/SDL.h>
|
||||
#include <math.h>
|
||||
#include "HandmadeMath.h"
|
||||
#include "stb_ds.h"
|
||||
|
||||
285
graphics.c
285
graphics.c
@@ -1,285 +0,0 @@
|
||||
#include "cell.h"
|
||||
#include "prosperon.h"
|
||||
#include "HandmadeMath.h"
|
||||
|
||||
#define STB_IMAGE_IMPLEMENTATION
|
||||
#define STBI_FAILURE_USERMSG
|
||||
#define STBI_NO_STDIO
|
||||
#include "stb_image.h"
|
||||
|
||||
#define STB_RECT_PACK_IMPLEMENTATION
|
||||
#include "stb_rect_pack.h"
|
||||
|
||||
#define CUTE_ASEPRITE_IMPLEMENTATION
|
||||
#include "cute_aseprite.h"
|
||||
|
||||
// input: (encoded image data of jpg, png, bmp, tiff)
|
||||
JSC_CCALL(os_image_decode,
|
||||
size_t len;
|
||||
void *raw = js_get_blob_data(js, &len, argv[0]);
|
||||
if (raw == -1) return JS_EXCEPTION;
|
||||
|
||||
int n, width, height;
|
||||
void *data = stbi_load_from_memory(raw, len, &width, &height, &n, 4);
|
||||
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "no known image type from pixel data: %s", stbi_failure_reason());
|
||||
|
||||
if (width <= 0 || height <= 0) {
|
||||
free(data);
|
||||
return JS_ThrowReferenceError(js, "decoded image has invalid size: %dx%d", width, height);
|
||||
}
|
||||
|
||||
int pitch = width*4;
|
||||
size_t pixels_size = pitch * height;
|
||||
|
||||
// Create JS object with surface data
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "width", JS_NewInt32(js, width));
|
||||
JS_SetPropertyStr(js, obj, "height", JS_NewInt32(js, height));
|
||||
JS_SetPropertyStr(js, obj, "format", JS_NewString(js, "rgba32"));
|
||||
JS_SetPropertyStr(js, obj, "pitch", JS_NewInt32(js, pitch));
|
||||
JS_SetPropertyStr(js, obj, "pixels", js_new_blob_stoned_copy(js, data, pixels_size));
|
||||
JS_SetPropertyStr(js, obj, "depth", JS_NewInt32(js, 8));
|
||||
JS_SetPropertyStr(js, obj, "hdr", JS_NewBool(js,0));
|
||||
|
||||
free(data);
|
||||
ret = obj;
|
||||
)
|
||||
|
||||
JSC_CCALL(graphics_hsl_to_rgb,
|
||||
float h, s, l;
|
||||
JS_ToFloat64(js, &h, argv[0]);
|
||||
JS_ToFloat64(js, &s, argv[1]);
|
||||
JS_ToFloat64(js, &l, argv[2]);
|
||||
float c = (1 - abs(2 * l - 1)) * s;
|
||||
float x = c * (1 - abs(fmod((h/60),2) - 1));
|
||||
float m = l - c / 2;
|
||||
float r = 0, g = 0, b = 0;
|
||||
|
||||
if (h < 60) { r = c; g = x; }
|
||||
else if (h < 120) { r = x; g = c; }
|
||||
else if (h < 180) { g = c; b = x; }
|
||||
else if (h < 240) { g = x; b = c; }
|
||||
else if (h < 300) { r = x; b = c; }
|
||||
else { r = c; b = x; }
|
||||
|
||||
return color2js(js, (colorf){r+m, g+m, b+m, 1});
|
||||
)
|
||||
|
||||
JSC_CCALL(os_make_line_prim,
|
||||
return JS_NULL;
|
||||
/*
|
||||
JSValue prim = JS_NewObject(js);
|
||||
HMM_Vec2 *v = js2cpvec2arr(js,argv[0]);
|
||||
|
||||
parsl_context *par_ctx = parsl_create_context((parsl_config){
|
||||
.thickness = js2number(js,argv[1]),
|
||||
.flags= PARSL_FLAG_ANNOTATIONS,
|
||||
.u_mode = js2number(js,argv[2])
|
||||
});
|
||||
|
||||
uint16_t spine_lens[] = {arrlen(v)};
|
||||
|
||||
parsl_mesh *m = parsl_mesh_from_lines(par_ctx, (parsl_spine_list){
|
||||
.num_vertices = arrlen(v),
|
||||
.num_spines = 1,
|
||||
.vertices = v,
|
||||
.spine_lengths = spine_lens,
|
||||
.closed = JS_ToBool(js,argv[3])
|
||||
});
|
||||
|
||||
JS_SetPropertyStr(js, prim, "pos", make_gpu_buffer(js,m->positions,sizeof(*m->positions)*m->num_vertices, 0, 2,1,0));
|
||||
|
||||
JS_SetPropertyStr(js, prim, "indices", make_gpu_buffer(js,m->triangle_indices,sizeof(*m->triangle_indices)*m->num_triangles*3, JS_TYPED_ARRAY_UINT32, 1,1,1));
|
||||
|
||||
float uv[m->num_vertices*2];
|
||||
for (int i = 0; i < m->num_vertices; i++) {
|
||||
uv[i*2] = m->annotations[i].u_along_curve;
|
||||
uv[i*2+1] = m->annotations[i].v_across_curve;
|
||||
}
|
||||
|
||||
JS_SetPropertyStr(js, prim, "uv", make_gpu_buffer(js, uv, sizeof(uv), 0,2,1,0));
|
||||
JS_SetPropertyStr(js,prim,"vertices", number2js(js,m->num_vertices));
|
||||
JS_SetPropertyStr(js,prim,"num_indices", number2js(js,m->num_triangles*3));
|
||||
JS_SetPropertyStr(js,prim,"first_index", number2js(js,0));
|
||||
|
||||
parsl_destroy_context(par_ctx);
|
||||
|
||||
return prim;
|
||||
*/
|
||||
)
|
||||
|
||||
JSC_CCALL(os_rectpack,
|
||||
int width = js2number(js,argv[0]);
|
||||
int height = js2number(js,argv[1]);
|
||||
int num = JS_ArrayLength(js,argv[2]);
|
||||
stbrp_context ctx[1];
|
||||
stbrp_rect rects[num];
|
||||
|
||||
for (int i = 0; i < num; i++) {
|
||||
HMM_Vec2 wh = js2vec2(js,JS_GetPropertyUint32(js, argv[2], i));
|
||||
rects[i].w = wh.x;
|
||||
rects[i].h = wh.y;
|
||||
rects[i].id = i;
|
||||
}
|
||||
|
||||
stbrp_node nodes[width];
|
||||
stbrp_init_target(ctx, width, height, nodes, width);
|
||||
int packed = stbrp_pack_rects(ctx, rects, num);
|
||||
|
||||
if (!packed) {
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
ret = JS_NewArray(js);
|
||||
for (int i = 0; i < num; i++) {
|
||||
HMM_Vec2 pos;
|
||||
pos.x = rects[i].x;
|
||||
pos.y = rects[i].y;
|
||||
JS_SetPropertyUint32(js, ret, i, vec22js(js,pos));
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
// input: (gif image data)
|
||||
JSC_CCALL(os_make_gif,
|
||||
size_t rawlen;
|
||||
void *raw = js_get_blob_data(js, &rawlen, argv[0]);
|
||||
if (raw == -1) return JS_EXCEPTION;
|
||||
if (!raw) return JS_ThrowReferenceError(js, "could not load gif from supplied array buffer");
|
||||
|
||||
int n;
|
||||
int frames;
|
||||
int *delays;
|
||||
int width;
|
||||
int height;
|
||||
void *pixels = stbi_load_gif_from_memory(raw, rawlen, &delays, &width, &height, &frames, &n, 4);
|
||||
|
||||
if (!pixels) {
|
||||
return JS_ThrowReferenceError(js, "1decode GIF: %s", stbi_failure_reason());
|
||||
}
|
||||
|
||||
// Always return an array of surfaces, even for single frame
|
||||
JSValue surface_array = JS_NewArray(js);
|
||||
ret = surface_array;
|
||||
|
||||
for (int i = 0; i < frames; i++) {
|
||||
// Create surface data object
|
||||
JSValue surfData = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, surfData, "width", JS_NewInt32(js, width));
|
||||
JS_SetPropertyStr(js, surfData, "height", JS_NewInt32(js, height));
|
||||
JS_SetPropertyStr(js, surfData, "format", JS_NewString(js, "rgba32"));
|
||||
JS_SetPropertyStr(js, surfData, "pitch", JS_NewInt32(js, width*4));
|
||||
|
||||
void *frame_pixels = (unsigned char*)pixels+(width*height*4*i);
|
||||
JS_SetPropertyStr(js, surfData, "pixels", js_new_blob_stoned_copy(js, frame_pixels, width*height*4));
|
||||
|
||||
// Add time property for animation frames
|
||||
if (frames > 1 && delays) {
|
||||
JS_SetPropertyStr(js, surfData, "time", number2js(js,(float)delays[i]/1000.0));
|
||||
}
|
||||
|
||||
JS_SetPropertyUint32(js, surface_array, i, surfData);
|
||||
}
|
||||
|
||||
CLEANUP:
|
||||
if (delays) free(delays);
|
||||
if (pixels) free(pixels);
|
||||
)
|
||||
|
||||
JSValue aseframe2js(JSContext *js, ase_frame_t aframe)
|
||||
{
|
||||
JSValue frame = JS_NewObject(js);
|
||||
|
||||
// Create surface data object instead of SDL_Surface
|
||||
JSValue surfData = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, surfData, "width", JS_NewInt32(js, aframe.ase->w));
|
||||
JS_SetPropertyStr(js, surfData, "height", JS_NewInt32(js, aframe.ase->h));
|
||||
JS_SetPropertyStr(js, surfData, "format", JS_NewString(js, "rgba32"));
|
||||
JS_SetPropertyStr(js, surfData, "pitch", JS_NewInt32(js, aframe.ase->w*4));
|
||||
JS_SetPropertyStr(js, surfData, "pixels", js_new_blob_stoned_copy(js, aframe.pixels, aframe.ase->w*aframe.ase->h*4));
|
||||
|
||||
JS_SetPropertyStr(js, frame, "surface", surfData);
|
||||
JS_SetPropertyStr(js, frame, "time", number2js(js,(float)aframe.duration_milliseconds/1000.0));
|
||||
return frame;
|
||||
}
|
||||
|
||||
// input: (aseprite data)
|
||||
JSC_CCALL(os_make_aseprite,
|
||||
size_t rawlen;
|
||||
void *raw = js_get_blob_data(js,&rawlen,argv[0]);
|
||||
if (raw == -1) return JS_EXCEPTION;
|
||||
if (!raw) return JS_ThrowReferenceError(js, "could not load aseprite from supplied array buffer: no data present");
|
||||
|
||||
ase_t *ase = cute_aseprite_load_from_memory(raw, rawlen, NULL);
|
||||
|
||||
if (!ase)
|
||||
return JS_ThrowReferenceError(js, "could not load aseprite from supplied array buffer: %s", aseprite_GetError());
|
||||
|
||||
if (ase->tag_count == 0) {
|
||||
// we're dealing with a single frame image, or single animation
|
||||
if (ase->frame_count == 1) {
|
||||
JSValue obj = aseframe2js(js,ase->frames[0]);
|
||||
cute_aseprite_free(ase);
|
||||
return obj;
|
||||
} else {
|
||||
// Multiple frames but no tags - create a simple animation
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JSValue frames = JS_NewArray(js);
|
||||
for (int f = 0; f < ase->frame_count; f++) {
|
||||
JSValue frame = aseframe2js(js,ase->frames[f]);
|
||||
JS_SetPropertyUint32(js, frames, f, frame);
|
||||
}
|
||||
JS_SetPropertyStr(js, obj, "frames", frames);
|
||||
JS_SetPropertyStr(js, obj, "loop", JS_NewBool(js, true));
|
||||
ret = obj;
|
||||
cute_aseprite_free(ase);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
JSValue obj = JS_NewObject(js);
|
||||
|
||||
for (int t = 0; t < ase->tag_count; t++) {
|
||||
ase_tag_t tag = ase->tags[t];
|
||||
JSValue anim = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, anim, "repeat", number2js(js,tag.repeat));
|
||||
switch(tag.loop_animation_direction) {
|
||||
case ASE_ANIMATION_DIRECTION_FORWARDS:
|
||||
JS_SetPropertyStr(js, anim, "loop", JS_NewString(js,"forward"));
|
||||
break;
|
||||
case ASE_ANIMATION_DIRECTION_BACKWORDS:
|
||||
JS_SetPropertyStr(js, anim, "loop", JS_NewString(js,"backward"));
|
||||
break;
|
||||
case ASE_ANIMATION_DIRECTION_PINGPONG:
|
||||
JS_SetPropertyStr(js, anim, "loop", JS_NewString(js,"pingpong"));
|
||||
break;
|
||||
}
|
||||
|
||||
int _frame = 0;
|
||||
JSValue frames = JS_NewArray(js);
|
||||
for (int f = tag.from_frame; f <= tag.to_frame; f++) {
|
||||
JSValue frame = aseframe2js(js,ase->frames[f]);
|
||||
JS_SetPropertyUint32(js, frames, _frame, frame);
|
||||
_frame++;
|
||||
}
|
||||
JS_SetPropertyStr(js, anim, "frames", frames);
|
||||
JS_SetPropertyStr(js, obj, tag.name, anim);
|
||||
}
|
||||
|
||||
ret = obj;
|
||||
|
||||
cute_aseprite_free(ase);
|
||||
)
|
||||
|
||||
const JSCFunctionListEntry js_graphics_funcs[] = {
|
||||
MIST_FUNC_DEF(os, rectpack, 3),
|
||||
MIST_FUNC_DEF(os, image_decode, 1),
|
||||
MIST_FUNC_DEF(os, make_gif, 1),
|
||||
MIST_FUNC_DEF(os, make_aseprite, 1),
|
||||
MIST_FUNC_DEF(os, make_line_prim, 5),
|
||||
MIST_FUNC_DEF(graphics, hsl_to_rgb, 3),
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_graphics_funcs)
|
||||
73
graphics.cm
73
graphics.cm
@@ -1,4 +1,4 @@
|
||||
var graphics = this
|
||||
var graphics = {}
|
||||
|
||||
var io = use('cellfs')
|
||||
var time = use('time')
|
||||
@@ -6,7 +6,10 @@ var res = use('resources')
|
||||
var json = use('json')
|
||||
var os = use('os')
|
||||
var staef = use('staef')
|
||||
var qoi = use('qoi')
|
||||
var qoi = use('image/qoi')
|
||||
var png = use('image/png')
|
||||
var gif = use('image/gif')
|
||||
var aseprite = use('image/aseprite')
|
||||
|
||||
var LASTUSE = "graphics:lastuse"
|
||||
var LOADING = "graphics:loading"
|
||||
@@ -94,16 +97,59 @@ function makeAnim(frames, loop=true){
|
||||
function decode_image(bytes, ext)
|
||||
{
|
||||
switch(ext) {
|
||||
case 'gif': return graphics.make_gif(bytes) // returns array of surfaces
|
||||
case 'gif': return decode_gif(gif.decode(bytes))
|
||||
case 'ase':
|
||||
case 'aseprite': return graphics.make_aseprite(bytes)
|
||||
case 'aseprite': return decode_aseprite(aseprite.decode(bytes))
|
||||
case 'qoi': return qoi.decode(bytes) // returns single surface
|
||||
case 'png': return png.decode(bytes) // returns single surface
|
||||
case 'jpg':
|
||||
case 'jpeg': return png.decode(bytes) // png.decode handles jpg too via stb_image
|
||||
case 'bmp': return png.decode(bytes) // png.decode handles bmp too via stb_image
|
||||
default:
|
||||
// Try QOI first since it's fast to check
|
||||
var qoi_result = qoi.decode(bytes)
|
||||
if (qoi_result) return qoi_result
|
||||
// Fall back to make_texture for other formats
|
||||
return graphics.image_decode(bytes) // returns single surface
|
||||
// Fall back to png decoder for other formats (uses stb_image)
|
||||
return png.decode(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert gif.decode output to graphics.cm format
|
||||
function decode_gif(decoded) {
|
||||
if (!decoded || !decoded.frames) return null
|
||||
|
||||
// Single frame - return just the surface
|
||||
if (decoded.frame_count == 1) {
|
||||
return decoded.frames[0]
|
||||
}
|
||||
|
||||
// Multiple frames - return array with time property
|
||||
return decoded.frames.map(function(frame) {
|
||||
return {
|
||||
surface: frame,
|
||||
time: (frame.duration || 100) / 1000.0 // convert ms to seconds
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Convert aseprite.decode output to graphics.cm format
|
||||
function decode_aseprite(decoded) {
|
||||
if (!decoded) return null
|
||||
|
||||
// Single frame - return just the surface
|
||||
if (decoded.frame_count == 1) {
|
||||
return { surface: decoded.frames[0] }
|
||||
}
|
||||
|
||||
// Multiple frames without tags - return as single animation
|
||||
return {
|
||||
frames: decoded.frames.map(function(frame) {
|
||||
return {
|
||||
surface: frame,
|
||||
time: (frame.duration || 100) / 1000.0 // convert ms to seconds
|
||||
}
|
||||
}),
|
||||
loop: true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +269,7 @@ graphics.texture = function texture(path) {
|
||||
var frameIndex = parts[2]
|
||||
|
||||
// Handle the case where animName is actually a frame index (e.g., "gears:0")
|
||||
if (animName != null && frameIndex == null && !isNaN(number(animName))) {
|
||||
if (animName != null && frameIndex == null && !isa(number(animName), null)) {
|
||||
frameIndex = number(animName)
|
||||
animName = null
|
||||
}
|
||||
@@ -250,7 +296,7 @@ graphics.texture = function texture(path) {
|
||||
// If cached is a single animation (has .frames property)
|
||||
if (cached.frames && isa(cached.frames, array)) {
|
||||
var idx = number(frameIndex)
|
||||
if (isNaN(idx)) return cached
|
||||
if (idx == null) return cached
|
||||
// Wrap the index
|
||||
idx = idx % cached.frames.length
|
||||
return cached.frames[idx].image
|
||||
@@ -366,13 +412,18 @@ var fontcache = {}
|
||||
var datas = []
|
||||
|
||||
graphics.get_font = function get_font(path) {
|
||||
if (typeof path != 'string') return path
|
||||
if (isa(path, object)) return path
|
||||
if (!isa(path, text))
|
||||
throw new Error(`Can't find font with path: ${path}`)
|
||||
|
||||
var parts = path.split('.')
|
||||
var size = 16 // default size
|
||||
if (!isNaN(parts[1])) {
|
||||
parts[1] = number(parts[1])
|
||||
if (parts[1]) {
|
||||
path = parts[0]
|
||||
size = Number(parts[1])
|
||||
size = parts[1]
|
||||
}
|
||||
|
||||
var fullpath = res.find_font(path)
|
||||
if (!fullpath) throw new Error(`Cannot load font ${path}`)
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ extern "C" {
|
||||
SDL_Window *js2SDL_Window(JSContext *js, JSValue v);
|
||||
SDL_GPUDevice *js2SDL_GPUDevice(JSContext *js, JSValue v);
|
||||
SDL_Texture *js2SDL_Texture(JSContext *js, JSValue v);
|
||||
SDL_GPUCommandBuffer *js2SDL_GPUCommandBuffer(JSContext *js, JSValue v);
|
||||
//SDL_GPUCommandBuffer *js2SDL_GPUCommandBuffer(JSContext *js, JSValue v);
|
||||
SDL_GPURenderPass *js2SDL_GPURenderPass(JSContext *js, JSValue v);
|
||||
double js2number(JSContext *js, JSValue v);
|
||||
}
|
||||
@@ -594,11 +594,11 @@ JSC_CCALL(imgui_newframe,
|
||||
|
||||
JSC_CCALL(imgui_prepare,
|
||||
ImGui::Render();
|
||||
ImGui_ImplSDLGPU3_PrepareDrawData(ImGui::GetDrawData(), js2SDL_GPUCommandBuffer(js,argv[0]));
|
||||
// ImGui_ImplSDLGPU3_PrepareDrawData(ImGui::GetDrawData(), js2SDL_GPUCommandBuffer(js,argv[0]));
|
||||
)
|
||||
|
||||
JSC_CCALL(imgui_endframe,
|
||||
ImGui_ImplSDLGPU3_RenderDrawData(ImGui::GetDrawData(), js2SDL_GPUCommandBuffer(js,argv[0]), js2SDL_GPURenderPass(js,argv[1]));
|
||||
// ImGui_ImplSDLGPU3_RenderDrawData(ImGui::GetDrawData(), js2SDL_GPUCommandBuffer(js,argv[0]), js2SDL_GPURenderPass(js,argv[1]));
|
||||
)
|
||||
|
||||
JSC_CCALL(imgui_wantmouse,
|
||||
@@ -610,7 +610,7 @@ JSC_CCALL(imgui_wantkeys,
|
||||
)
|
||||
|
||||
JSC_CCALL(imgui_init,
|
||||
ImGui::CreateContext();
|
||||
/* ImGui::CreateContext();
|
||||
ImGuiIO& io = ImGui::GetIO();
|
||||
io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard;
|
||||
io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad;
|
||||
@@ -629,6 +629,7 @@ JSC_CCALL(imgui_init,
|
||||
|
||||
io.IniFilename = ".prosperon/imgui.ini";
|
||||
ImGui::LoadIniSettingsFromDisk(".prosperon/imgui.ini");
|
||||
*/
|
||||
)
|
||||
|
||||
// ==================== IMPLOT FUNCTIONS ====================
|
||||
|
||||
5
input.cm
Normal file
5
input.cm
Normal file
@@ -0,0 +1,5 @@
|
||||
var sdl_input = use('sdl3/input')
|
||||
|
||||
return {
|
||||
gamepad_id_to_tyle: sdl_input.gamepad_id_to_type,
|
||||
}
|
||||
717
msf_gif.h
717
msf_gif.h
@@ -1,717 +0,0 @@
|
||||
/*
|
||||
HOW TO USE:
|
||||
|
||||
In exactly one translation unit (.c or .cpp file), #define MSF_GIF_IMPL before including the header, like so:
|
||||
|
||||
#define MSF_GIF_IMPL
|
||||
#include "msf_gif.h"
|
||||
|
||||
Everywhere else, just include the header like normal.
|
||||
|
||||
|
||||
USAGE EXAMPLE:
|
||||
|
||||
int width = 480, height = 320, centisecondsPerFrame = 5, bitDepth = 16;
|
||||
MsfGifState gifState = {};
|
||||
// msf_gif_bgra_flag = true; //optionally, set this flag if your pixels are in BGRA format instead of RGBA
|
||||
// msf_gif_alpha_threshold = 128; //optionally, enable transparency (see function documentation below for details)
|
||||
msf_gif_begin(&gifState, width, height);
|
||||
msf_gif_frame(&gifState, ..., centisecondsPerFrame, bitDepth, width * 4); //frame 1
|
||||
msf_gif_frame(&gifState, ..., centisecondsPerFrame, bitDepth, width * 4); //frame 2
|
||||
msf_gif_frame(&gifState, ..., centisecondsPerFrame, bitDepth, width * 4); //frame 3, etc...
|
||||
MsfGifResult result = msf_gif_end(&gifState);
|
||||
if (result.data) {
|
||||
FILE * fp = fopen("MyGif.gif", "wb");
|
||||
fwrite(result.data, result.dataSize, 1, fp);
|
||||
fclose(fp);
|
||||
}
|
||||
msf_gif_free(result);
|
||||
|
||||
Detailed function documentation can be found in the header section below.
|
||||
|
||||
|
||||
ERROR HANDLING:
|
||||
|
||||
If memory allocation fails, the functions will signal the error via their return values.
|
||||
If one function call fails, the library will free all of its allocations,
|
||||
and all subsequent calls will safely no-op and return 0 until the next call to `msf_gif_begin()`.
|
||||
Therefore, it's safe to check only the return value of `msf_gif_end()`.
|
||||
|
||||
|
||||
REPLACING MALLOC:
|
||||
|
||||
This library uses malloc+realloc+free internally for memory allocation.
|
||||
To facilitate integration with custom memory allocators, these calls go through macros, which can be redefined.
|
||||
The expected function signature equivalents of the macros are as follows:
|
||||
|
||||
void * MSF_GIF_MALLOC(void * context, size_t newSize)
|
||||
void * MSF_GIF_REALLOC(void * context, void * oldMemory, size_t oldSize, size_t newSize)
|
||||
void MSF_GIF_FREE(void * context, void * oldMemory, size_t oldSize)
|
||||
|
||||
If your allocator needs a context pointer, you can set the `customAllocatorContext` field of the MsfGifState struct
|
||||
before calling msf_gif_begin(), and it will be passed to all subsequent allocator macro calls.
|
||||
|
||||
The maximum number of bytes the library will allocate to encode a single gif is bounded by the following formula:
|
||||
`(2 * 1024 * 1024) + (width * height * 8) + ((1024 + width * height * 1.5) * 3 * frameCount)`
|
||||
The peak heap memory usage in bytes, if using a general-purpose heap allocator, is bounded by the following formula:
|
||||
`(2 * 1024 * 1024) + (width * height * 9.5) + 1024 + (16 * frameCount) + (2 * sizeOfResultingGif)
|
||||
|
||||
|
||||
See end of file for license information.
|
||||
*/
|
||||
|
||||
//version 2.2
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// HEADER ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef MSF_GIF_H
|
||||
#define MSF_GIF_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
typedef struct {
|
||||
void * data;
|
||||
size_t dataSize;
|
||||
|
||||
size_t allocSize; //internal use
|
||||
void * contextPointer; //internal use
|
||||
} MsfGifResult;
|
||||
|
||||
typedef struct { //internal use
|
||||
uint32_t * pixels;
|
||||
int depth, count, rbits, gbits, bbits;
|
||||
} MsfCookedFrame;
|
||||
|
||||
typedef struct MsfGifBuffer {
|
||||
struct MsfGifBuffer * next;
|
||||
size_t size;
|
||||
uint8_t data[1];
|
||||
} MsfGifBuffer;
|
||||
|
||||
typedef size_t (* MsfGifFileWriteFunc) (const void * buffer, size_t size, size_t count, void * stream);
|
||||
typedef struct {
|
||||
MsfGifFileWriteFunc fileWriteFunc;
|
||||
void * fileWriteData;
|
||||
MsfCookedFrame previousFrame;
|
||||
MsfCookedFrame currentFrame;
|
||||
int16_t * lzwMem;
|
||||
MsfGifBuffer * listHead;
|
||||
MsfGifBuffer * listTail;
|
||||
int width, height;
|
||||
void * customAllocatorContext;
|
||||
int framesSubmitted; //needed for transparency to work correctly (because we reach into the previous frame)
|
||||
} MsfGifState;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif //__cplusplus
|
||||
|
||||
/**
|
||||
* @param width Image width in pixels.
|
||||
* @param height Image height in pixels.
|
||||
* @return Non-zero on success, 0 on error.
|
||||
*/
|
||||
int msf_gif_begin(MsfGifState * handle, int width, int height);
|
||||
|
||||
/**
|
||||
* @param pixelData Pointer to raw framebuffer data. Rows must be contiguous in memory, in RGBA8 format
|
||||
* (or BGRA8 if you have set `msf_gif_bgra_flag = true`).
|
||||
* Note: This function does NOT free `pixelData`. You must free it yourself afterwards.
|
||||
* @param centiSecondsPerFrame How many hundredths of a second this frame should be displayed for.
|
||||
* Note: This being specified in centiseconds is a limitation of the GIF format.
|
||||
* @param maxBitDepth Limits how many bits per pixel can be used when quantizing the gif.
|
||||
* The actual bit depth chosen for a given frame will be less than or equal to
|
||||
* the supplied maximum, depending on the variety of colors used in the frame.
|
||||
* `maxBitDepth` will be clamped between 1 and 16. The recommended default is 16.
|
||||
* Lowering this value can result in faster exports and smaller gifs,
|
||||
* but the quality may suffer.
|
||||
* Please experiment with this value to find what works best for your application.
|
||||
* @param pitchInBytes The number of bytes from the beginning of one row of pixels to the beginning of the next.
|
||||
* If you want to flip the image, just pass in a negative pitch.
|
||||
* @return Non-zero on success, 0 on error.
|
||||
*/
|
||||
int msf_gif_frame(MsfGifState * handle, uint8_t * pixelData, int centiSecondsPerFame, int maxBitDepth, int pitchInBytes);
|
||||
|
||||
/**
|
||||
* @return A block of memory containing the gif file data, or NULL on error.
|
||||
* You are responsible for freeing this via `msf_gif_free()`.
|
||||
*/
|
||||
MsfGifResult msf_gif_end(MsfGifState * handle);
|
||||
|
||||
/**
|
||||
* @param result The MsfGifResult struct, verbatim as it was returned from `msf_gif_end()`.
|
||||
*/
|
||||
void msf_gif_free(MsfGifResult result);
|
||||
|
||||
//The gif format only supports 1-bit transparency, meaning a pixel will either be fully transparent or fully opaque.
|
||||
//Pixels with an alpha value less than the alpha threshold will be treated as transparent.
|
||||
//To enable exporting transparent gifs, set it to a value between 1 and 255 (inclusive) before calling msf_gif_frame().
|
||||
//Setting it to 0 causes the alpha channel to be ignored. Its initial value is 0.
|
||||
extern int msf_gif_alpha_threshold;
|
||||
|
||||
//Set `msf_gif_bgra_flag = true` before calling `msf_gif_frame()` if your pixels are in BGRA byte order instead of RBGA.
|
||||
extern int msf_gif_bgra_flag;
|
||||
|
||||
|
||||
|
||||
//TO-FILE FUNCTIONS
|
||||
//These functions are equivalent to the ones above, but they write results to a file incrementally,
|
||||
//instead of building a buffer in memory. This can result in lower memory usage when saving large gifs,
|
||||
//because memory usage is bounded by only the size of a single frame, and is not dependent on the number of frames.
|
||||
//There is currently no reason to use these unless you are on a memory-constrained platform.
|
||||
//If in doubt about which API to use, for now you should use the normal (non-file) functions above.
|
||||
//The signature of MsfGifFileWriteFunc matches fwrite for convenience, so that you can use the C file API like so:
|
||||
// FILE * fp = fopen("MyGif.gif", "wb");
|
||||
// msf_gif_begin_to_file(&handle, width, height, (MsfGifFileWriteFunc) fwrite, (void *) fp);
|
||||
// msf_gif_frame_to_file(...)
|
||||
// msf_gif_end_to_file(&handle);
|
||||
// fclose(fp);
|
||||
//If you use a custom file write function, you must take care to return the same values that fwrite() would return.
|
||||
//Note that all three functions will potentially write to the file.
|
||||
int msf_gif_begin_to_file(MsfGifState * handle, int width, int height, MsfGifFileWriteFunc func, void * filePointer);
|
||||
int msf_gif_frame_to_file(MsfGifState * handle, uint8_t * pixelData, int centiSecondsPerFame, int maxBitDepth, int pitchInBytes);
|
||||
int msf_gif_end_to_file(MsfGifState * handle); //returns 0 on error and non-zero on success
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif //__cplusplus
|
||||
|
||||
#endif //MSF_GIF_H
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// IMPLEMENTATION ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef MSF_GIF_IMPL
|
||||
#ifndef MSF_GIF_ALREADY_IMPLEMENTED_IN_THIS_TRANSLATION_UNIT
|
||||
#define MSF_GIF_ALREADY_IMPLEMENTED_IN_THIS_TRANSLATION_UNIT
|
||||
|
||||
//ensure the library user has either defined all of malloc/realloc/free, or none
|
||||
#if defined(MSF_GIF_MALLOC) && defined(MSF_GIF_REALLOC) && defined(MSF_GIF_FREE) //ok
|
||||
#elif !defined(MSF_GIF_MALLOC) && !defined(MSF_GIF_REALLOC) && !defined(MSF_GIF_FREE) //ok
|
||||
#else
|
||||
#error "You must either define all of MSF_GIF_MALLOC, MSF_GIF_REALLOC, and MSF_GIF_FREE, or define none of them"
|
||||
#endif
|
||||
|
||||
//provide default allocator definitions that redirect to the standard global allocator
|
||||
#if !defined(MSF_GIF_MALLOC)
|
||||
#include <stdlib.h> //malloc, etc.
|
||||
#define MSF_GIF_MALLOC(contextPointer, newSize) malloc(newSize)
|
||||
#define MSF_GIF_REALLOC(contextPointer, oldMemory, oldSize, newSize) realloc(oldMemory, newSize)
|
||||
#define MSF_GIF_FREE(contextPointer, oldMemory, oldSize) free(oldMemory)
|
||||
#endif
|
||||
|
||||
//instrumentation for capturing profiling traces (useless for the library user, but useful for the library author)
|
||||
#ifdef MSF_GIF_ENABLE_TRACING
|
||||
#define MsfTimeFunc TimeFunc
|
||||
#define MsfTimeLoop TimeLoop
|
||||
#define msf_init_profiling_thread init_profiling_thread
|
||||
#else
|
||||
#define MsfTimeFunc
|
||||
#define MsfTimeLoop(name)
|
||||
#define msf_init_profiling_thread()
|
||||
#endif //MSF_GIF_ENABLE_TRACING
|
||||
|
||||
#include <string.h> //memcpy
|
||||
|
||||
//TODO: use compiler-specific notation to force-inline functions currently marked inline
|
||||
#if defined(__GNUC__) //gcc, clang
|
||||
static inline int msf_bit_log(int i) { return 32 - __builtin_clz(i); }
|
||||
#elif defined(_MSC_VER) //msvc
|
||||
#include <intrin.h>
|
||||
static inline int msf_bit_log(int i) { unsigned long idx; _BitScanReverse(&idx, i); return idx + 1; }
|
||||
#else //fallback implementation for other compilers
|
||||
//from https://stackoverflow.com/a/31718095/3064745 - thanks!
|
||||
static inline int msf_bit_log(int i) {
|
||||
static const int MultiplyDeBruijnBitPosition[32] = {
|
||||
0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
|
||||
8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31,
|
||||
};
|
||||
i |= i >> 1;
|
||||
i |= i >> 2;
|
||||
i |= i >> 4;
|
||||
i |= i >> 8;
|
||||
i |= i >> 16;
|
||||
return MultiplyDeBruijnBitPosition[(uint32_t)(i * 0x07C4ACDDU) >> 27] + 1;
|
||||
}
|
||||
#endif
|
||||
static inline int msf_imin(int a, int b) { return a < b? a : b; }
|
||||
static inline int msf_imax(int a, int b) { return b < a? a : b; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// Frame Cooking ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if (defined (__SSE2__) || defined (_M_X64) || _M_IX86_FP == 2) && !defined(MSF_GIF_NO_SSE2)
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
int msf_gif_alpha_threshold = 0;
|
||||
int msf_gif_bgra_flag = 0;
|
||||
|
||||
static void msf_cook_frame(MsfCookedFrame * frame, uint8_t * raw, uint8_t * used,
|
||||
int width, int height, int pitch, int depth)
|
||||
{ MsfTimeFunc
|
||||
//bit depth for each channel
|
||||
const static int rdepthsArray[17] = { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5 };
|
||||
const static int gdepthsArray[17] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6 };
|
||||
const static int bdepthsArray[17] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5 };
|
||||
//this extra level of indirection looks unnecessary but we need to explicitly decay the arrays to pointers
|
||||
//in order to be able to swap them because of C's annoying not-quite-pointers, not-quite-value-types stack arrays.
|
||||
const int * rdepths = msf_gif_bgra_flag? bdepthsArray : rdepthsArray;
|
||||
const int * gdepths = gdepthsArray;
|
||||
const int * bdepths = msf_gif_bgra_flag? rdepthsArray : bdepthsArray;
|
||||
|
||||
const static int ditherKernel[16] = {
|
||||
0 << 12, 8 << 12, 2 << 12, 10 << 12,
|
||||
12 << 12, 4 << 12, 14 << 12, 6 << 12,
|
||||
3 << 12, 11 << 12, 1 << 12, 9 << 12,
|
||||
15 << 12, 7 << 12, 13 << 12, 5 << 12,
|
||||
};
|
||||
|
||||
uint32_t * cooked = frame->pixels;
|
||||
int count = 0;
|
||||
MsfTimeLoop("do") do {
|
||||
int rbits = rdepths[depth], gbits = gdepths[depth], bbits = bdepths[depth];
|
||||
int paletteSize = (1 << (rbits + gbits + bbits)) + 1;
|
||||
memset(used, 0, paletteSize * sizeof(uint8_t));
|
||||
|
||||
//TODO: document what this math does and why it's correct
|
||||
int rdiff = (1 << (8 - rbits)) - 1;
|
||||
int gdiff = (1 << (8 - gbits)) - 1;
|
||||
int bdiff = (1 << (8 - bbits)) - 1;
|
||||
short rmul = (short) ((255.0f - rdiff) / 255.0f * 257);
|
||||
short gmul = (short) ((255.0f - gdiff) / 255.0f * 257);
|
||||
short bmul = (short) ((255.0f - bdiff) / 255.0f * 257);
|
||||
|
||||
int gmask = ((1 << gbits) - 1) << rbits;
|
||||
int bmask = ((1 << bbits) - 1) << rbits << gbits;
|
||||
|
||||
MsfTimeLoop("cook") for (int y = 0; y < height; ++y) {
|
||||
int x = 0;
|
||||
|
||||
#if (defined (__SSE2__) || defined (_M_X64) || _M_IX86_FP == 2) && !defined(MSF_GIF_NO_SSE2)
|
||||
__m128i k = _mm_loadu_si128((__m128i *) &ditherKernel[(y & 3) * 4]);
|
||||
__m128i k2 = _mm_or_si128(_mm_srli_epi32(k, rbits), _mm_slli_epi32(_mm_srli_epi32(k, bbits), 16));
|
||||
for (; x < width - 3; x += 4) {
|
||||
uint8_t * pixels = &raw[y * pitch + x * 4];
|
||||
__m128i p = _mm_loadu_si128((__m128i *) pixels);
|
||||
|
||||
__m128i rb = _mm_and_si128(p, _mm_set1_epi32(0x00FF00FF));
|
||||
__m128i rb1 = _mm_mullo_epi16(rb, _mm_set_epi16(bmul, rmul, bmul, rmul, bmul, rmul, bmul, rmul));
|
||||
__m128i rb2 = _mm_adds_epu16(rb1, k2);
|
||||
__m128i r3 = _mm_srli_epi32(_mm_and_si128(rb2, _mm_set1_epi32(0x0000FFFF)), 16 - rbits);
|
||||
__m128i b3 = _mm_and_si128(_mm_srli_epi32(rb2, 32 - rbits - gbits - bbits), _mm_set1_epi32(bmask));
|
||||
|
||||
__m128i g = _mm_and_si128(_mm_srli_epi32(p, 8), _mm_set1_epi32(0x000000FF));
|
||||
__m128i g1 = _mm_mullo_epi16(g, _mm_set1_epi32(gmul));
|
||||
__m128i g2 = _mm_adds_epu16(g1, _mm_srli_epi32(k, gbits));
|
||||
__m128i g3 = _mm_and_si128(_mm_srli_epi32(g2, 16 - rbits - gbits), _mm_set1_epi32(gmask));
|
||||
|
||||
__m128i out = _mm_or_si128(_mm_or_si128(r3, g3), b3);
|
||||
|
||||
//mask in transparency based on threshold
|
||||
//NOTE: we can theoretically do a sub instead of srli by doing an unsigned compare via bias
|
||||
// to maybe save a TINY amount of throughput? but lol who cares maybe I'll do it later -m
|
||||
__m128i invAlphaMask = _mm_cmplt_epi32(_mm_srli_epi32(p, 24), _mm_set1_epi32(msf_gif_alpha_threshold));
|
||||
out = _mm_or_si128(_mm_and_si128(invAlphaMask, _mm_set1_epi32(paletteSize - 1)), _mm_andnot_si128(invAlphaMask, out));
|
||||
|
||||
//TODO: does storing this as a __m128i then reading it back as a uint32_t violate strict aliasing?
|
||||
uint32_t * c = &cooked[y * width + x];
|
||||
_mm_storeu_si128((__m128i *) c, out);
|
||||
}
|
||||
#endif
|
||||
|
||||
//scalar cleanup loop
|
||||
for (; x < width; ++x) {
|
||||
uint8_t * p = &raw[y * pitch + x * 4];
|
||||
|
||||
//transparent pixel if alpha is low
|
||||
if (p[3] < msf_gif_alpha_threshold) {
|
||||
cooked[y * width + x] = paletteSize - 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
int dx = x & 3, dy = y & 3;
|
||||
int k = ditherKernel[dy * 4 + dx];
|
||||
cooked[y * width + x] =
|
||||
(msf_imin(65535, p[2] * bmul + (k >> bbits)) >> (16 - rbits - gbits - bbits) & bmask) |
|
||||
(msf_imin(65535, p[1] * gmul + (k >> gbits)) >> (16 - rbits - gbits ) & gmask) |
|
||||
msf_imin(65535, p[0] * rmul + (k >> rbits)) >> (16 - rbits );
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
MsfTimeLoop("mark") for (int i = 0; i < width * height; ++i) {
|
||||
used[cooked[i]] = 1;
|
||||
}
|
||||
|
||||
//count used colors, transparent is ignored
|
||||
MsfTimeLoop("count") for (int j = 0; j < paletteSize - 1; ++j) {
|
||||
count += used[j];
|
||||
}
|
||||
} while (count >= 256 && --depth);
|
||||
|
||||
MsfCookedFrame ret = { cooked, depth, count, rdepths[depth], gdepths[depth], bdepths[depth] };
|
||||
*frame = ret;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// Frame Compression ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline void msf_put_code(uint8_t * * writeHead, uint32_t * blockBits, int len, uint32_t code) {
|
||||
//insert new code into block buffer
|
||||
int idx = *blockBits / 8;
|
||||
int bit = *blockBits % 8;
|
||||
(*writeHead)[idx + 0] |= code << bit ;
|
||||
(*writeHead)[idx + 1] |= code >> ( 8 - bit);
|
||||
(*writeHead)[idx + 2] |= code >> (16 - bit);
|
||||
*blockBits += len;
|
||||
|
||||
//prep the next block buffer if the current one is full
|
||||
if (*blockBits >= 256 * 8) {
|
||||
*blockBits -= 255 * 8;
|
||||
(*writeHead) += 256;
|
||||
(*writeHead)[2] = (*writeHead)[1];
|
||||
(*writeHead)[1] = (*writeHead)[0];
|
||||
(*writeHead)[0] = 255;
|
||||
memset((*writeHead) + 4, 0, 256);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int16_t * data;
|
||||
int len;
|
||||
int stride;
|
||||
} MsfStridedList;
|
||||
|
||||
static inline void msf_lzw_reset(MsfStridedList * lzw, int tableSize, int stride) { MsfTimeFunc
|
||||
memset(lzw->data, 0xFF, 4096 * stride * sizeof(int16_t));
|
||||
lzw->len = tableSize + 2;
|
||||
lzw->stride = stride;
|
||||
}
|
||||
|
||||
static MsfGifBuffer * msf_compress_frame(void * allocContext, int width, int height, int centiSeconds,
|
||||
MsfCookedFrame frame, MsfGifState * handle, uint8_t * used, int16_t * lzwMem)
|
||||
{ MsfTimeFunc
|
||||
//NOTE: we reserve enough memory for theoretical the worst case upfront because it's a reasonable amount,
|
||||
// and prevents us from ever having to check size or realloc during compression
|
||||
int maxBufSize = offsetof(MsfGifBuffer, data) + 32 + 256 * 3 + width * height * 3 / 2; //headers + color table + data
|
||||
MsfGifBuffer * buffer = (MsfGifBuffer *) MSF_GIF_MALLOC(allocContext, maxBufSize);
|
||||
if (!buffer) { return NULL; }
|
||||
uint8_t * writeHead = buffer->data;
|
||||
MsfStridedList lzw = { lzwMem };
|
||||
|
||||
//allocate tlb
|
||||
int totalBits = frame.rbits + frame.gbits + frame.bbits;
|
||||
int tlbSize = (1 << totalBits) + 1;
|
||||
uint8_t tlb[(1 << 16) + 1]; //only 64k, so stack allocating is fine
|
||||
|
||||
//generate palette
|
||||
typedef struct { uint8_t r, g, b; } Color3;
|
||||
Color3 table[256] = { {0} };
|
||||
int tableIdx = 1; //we start counting at 1 because 0 is the transparent color
|
||||
//transparent is always last in the table
|
||||
tlb[tlbSize-1] = 0;
|
||||
MsfTimeLoop("table") for (int i = 0; i < tlbSize-1; ++i) {
|
||||
if (used[i]) {
|
||||
tlb[i] = tableIdx;
|
||||
int rmask = (1 << frame.rbits) - 1;
|
||||
int gmask = (1 << frame.gbits) - 1;
|
||||
//isolate components
|
||||
int r = i & rmask;
|
||||
int g = i >> frame.rbits & gmask;
|
||||
int b = i >> (frame.rbits + frame.gbits);
|
||||
//shift into highest bits
|
||||
r <<= 8 - frame.rbits;
|
||||
g <<= 8 - frame.gbits;
|
||||
b <<= 8 - frame.bbits;
|
||||
table[tableIdx].r = r | r >> frame.rbits | r >> (frame.rbits * 2) | r >> (frame.rbits * 3);
|
||||
table[tableIdx].g = g | g >> frame.gbits | g >> (frame.gbits * 2) | g >> (frame.gbits * 3);
|
||||
table[tableIdx].b = b | b >> frame.bbits | b >> (frame.bbits * 2) | b >> (frame.bbits * 3);
|
||||
if (msf_gif_bgra_flag) {
|
||||
uint8_t temp = table[tableIdx].r;
|
||||
table[tableIdx].r = table[tableIdx].b;
|
||||
table[tableIdx].b = temp;
|
||||
}
|
||||
++tableIdx;
|
||||
}
|
||||
}
|
||||
int hasTransparentPixels = used[tlbSize-1];
|
||||
|
||||
//SPEC: "Because of some algorithmic constraints however, black & white images which have one color bit
|
||||
// must be indicated as having a code size of 2."
|
||||
int tableBits = msf_imax(2, msf_bit_log(tableIdx - 1));
|
||||
int tableSize = 1 << tableBits;
|
||||
//NOTE: we don't just compare `depth` field here because it will be wrong for the first frame and we will segfault
|
||||
MsfCookedFrame previous = handle->previousFrame;
|
||||
int hasSamePal = frame.rbits == previous.rbits && frame.gbits == previous.gbits && frame.bbits == previous.bbits;
|
||||
int framesCompatible = hasSamePal && !hasTransparentPixels;
|
||||
|
||||
//NOTE: because __attribute__((__packed__)) is annoyingly compiler-specific, we do this unreadable weirdness
|
||||
char headerBytes[19] = "\x21\xF9\x04\x05\0\0\0\0" "\x2C\0\0\0\0\0\0\0\0\x80";
|
||||
//NOTE: we need to check the frame number because if we reach into the buffer prior to the first frame,
|
||||
// we'll just clobber the file header instead, which is a bug
|
||||
if (hasTransparentPixels && handle->framesSubmitted > 0) {
|
||||
handle->listTail->data[3] = 0x09; //set the previous frame's disposal to background, so transparency is possible
|
||||
}
|
||||
memcpy(&headerBytes[4], ¢iSeconds, 2);
|
||||
memcpy(&headerBytes[13], &width, 2);
|
||||
memcpy(&headerBytes[15], &height, 2);
|
||||
headerBytes[17] |= tableBits - 1;
|
||||
memcpy(writeHead, headerBytes, 18);
|
||||
writeHead += 18;
|
||||
|
||||
//local color table
|
||||
memcpy(writeHead, table, tableSize * sizeof(Color3));
|
||||
writeHead += tableSize * sizeof(Color3);
|
||||
*writeHead++ = tableBits;
|
||||
|
||||
//prep block
|
||||
memset(writeHead, 0, 260);
|
||||
writeHead[0] = 255;
|
||||
uint32_t blockBits = 8; //relative to block.head
|
||||
|
||||
//SPEC: "Encoders should output a Clear code as the first code of each image data stream."
|
||||
msf_lzw_reset(&lzw, tableSize, tableIdx);
|
||||
msf_put_code(&writeHead, &blockBits, msf_bit_log(lzw.len - 1), tableSize);
|
||||
|
||||
int lastCode = framesCompatible && frame.pixels[0] == previous.pixels[0]? 0 : tlb[frame.pixels[0]];
|
||||
MsfTimeLoop("compress") for (int i = 1; i < width * height; ++i) {
|
||||
//PERF: branching vs. branchless version of this line is observed to have no discernable impact on speed
|
||||
int color = framesCompatible && frame.pixels[i] == previous.pixels[i]? 0 : tlb[frame.pixels[i]];
|
||||
int code = (&lzw.data[lastCode * lzw.stride])[color];
|
||||
if (code < 0) {
|
||||
//write to code stream
|
||||
int codeBits = msf_bit_log(lzw.len - 1);
|
||||
msf_put_code(&writeHead, &blockBits, codeBits, lastCode);
|
||||
|
||||
if (lzw.len > 4095) {
|
||||
//reset buffer code table
|
||||
msf_put_code(&writeHead, &blockBits, codeBits, tableSize);
|
||||
msf_lzw_reset(&lzw, tableSize, tableIdx);
|
||||
} else {
|
||||
(&lzw.data[lastCode * lzw.stride])[color] = lzw.len;
|
||||
++lzw.len;
|
||||
}
|
||||
|
||||
lastCode = color;
|
||||
} else {
|
||||
lastCode = code;
|
||||
}
|
||||
}
|
||||
|
||||
//write code for leftover index buffer contents, then the end code
|
||||
msf_put_code(&writeHead, &blockBits, msf_imin(12, msf_bit_log(lzw.len - 1)), lastCode);
|
||||
msf_put_code(&writeHead, &blockBits, msf_imin(12, msf_bit_log(lzw.len)), tableSize + 1);
|
||||
|
||||
//flush remaining data
|
||||
if (blockBits > 8) {
|
||||
int bytes = (blockBits + 7) / 8; //round up
|
||||
writeHead[0] = bytes - 1;
|
||||
writeHead += bytes;
|
||||
}
|
||||
*writeHead++ = 0; //terminating block
|
||||
|
||||
//fill in buffer header and shrink buffer to fit data
|
||||
buffer->next = NULL;
|
||||
buffer->size = writeHead - buffer->data;
|
||||
MsfGifBuffer * moved =
|
||||
(MsfGifBuffer *) MSF_GIF_REALLOC(allocContext, buffer, maxBufSize, offsetof(MsfGifBuffer, data) + buffer->size);
|
||||
if (!moved) { MSF_GIF_FREE(allocContext, buffer, maxBufSize); return NULL; }
|
||||
return moved;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// To-memory API ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static const int lzwAllocSize = 4096 * 256 * sizeof(int16_t);
|
||||
|
||||
//NOTE: by C standard library conventions, freeing NULL should be a no-op,
|
||||
// but just in case the user's custom free doesn't follow that rule, we do null checks on our end as well.
|
||||
static void msf_free_gif_state(MsfGifState * handle) {
|
||||
if (handle->previousFrame.pixels) MSF_GIF_FREE(handle->customAllocatorContext, handle->previousFrame.pixels,
|
||||
handle->width * handle->height * sizeof(uint32_t));
|
||||
if (handle->currentFrame.pixels) MSF_GIF_FREE(handle->customAllocatorContext, handle->currentFrame.pixels,
|
||||
handle->width * handle->height * sizeof(uint32_t));
|
||||
if (handle->lzwMem) MSF_GIF_FREE(handle->customAllocatorContext, handle->lzwMem, lzwAllocSize);
|
||||
for (MsfGifBuffer * node = handle->listHead; node;) {
|
||||
MsfGifBuffer * next = node->next; //NOTE: we have to copy the `next` pointer BEFORE freeing the node holding it
|
||||
MSF_GIF_FREE(handle->customAllocatorContext, node, offsetof(MsfGifBuffer, data) + node->size);
|
||||
node = next;
|
||||
}
|
||||
handle->listHead = NULL; //this implicitly marks the handle as invalid until the next msf_gif_begin() call
|
||||
}
|
||||
|
||||
int msf_gif_begin(MsfGifState * handle, int width, int height) { MsfTimeFunc
|
||||
//NOTE: we cannot stomp the entire struct to zero because we must preserve `customAllocatorContext`.
|
||||
MsfCookedFrame empty = {0}; //god I hate MSVC...
|
||||
handle->previousFrame = empty;
|
||||
handle->currentFrame = empty;
|
||||
handle->width = width;
|
||||
handle->height = height;
|
||||
handle->framesSubmitted = 0;
|
||||
|
||||
//allocate memory for LZW buffer
|
||||
//NOTE: Unfortunately we can't just use stack memory for the LZW table because it's 2MB,
|
||||
// which is more stack space than most operating systems give by default,
|
||||
// and we can't realistically expect users to be willing to override that just to use our library,
|
||||
// so we have to allocate this on the heap.
|
||||
handle->lzwMem = (int16_t *) MSF_GIF_MALLOC(handle->customAllocatorContext, lzwAllocSize);
|
||||
handle->previousFrame.pixels =
|
||||
(uint32_t *) MSF_GIF_MALLOC(handle->customAllocatorContext, handle->width * handle->height * sizeof(uint32_t));
|
||||
handle->currentFrame.pixels =
|
||||
(uint32_t *) MSF_GIF_MALLOC(handle->customAllocatorContext, handle->width * handle->height * sizeof(uint32_t));
|
||||
|
||||
//setup header buffer header (lol)
|
||||
handle->listHead = (MsfGifBuffer *) MSF_GIF_MALLOC(handle->customAllocatorContext, offsetof(MsfGifBuffer, data) + 32);
|
||||
if (!handle->listHead || !handle->lzwMem || !handle->previousFrame.pixels || !handle->currentFrame.pixels) {
|
||||
msf_free_gif_state(handle);
|
||||
return 0;
|
||||
}
|
||||
handle->listTail = handle->listHead;
|
||||
handle->listHead->next = NULL;
|
||||
handle->listHead->size = 32;
|
||||
|
||||
//NOTE: because __attribute__((__packed__)) is annoyingly compiler-specific, we do this unreadable weirdness
|
||||
char headerBytes[33] = "GIF89a\0\0\0\0\x70\0\0" "\x21\xFF\x0BNETSCAPE2.0\x03\x01\0\0\0";
|
||||
memcpy(&headerBytes[6], &width, 2);
|
||||
memcpy(&headerBytes[8], &height, 2);
|
||||
memcpy(handle->listHead->data, headerBytes, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msf_gif_frame(MsfGifState * handle, uint8_t * pixelData, int centiSecondsPerFame, int maxBitDepth, int pitchInBytes)
|
||||
{ MsfTimeFunc
|
||||
if (!handle->listHead) { return 0; }
|
||||
|
||||
maxBitDepth = msf_imax(1, msf_imin(16, maxBitDepth));
|
||||
if (pitchInBytes == 0) pitchInBytes = handle->width * 4;
|
||||
if (pitchInBytes < 0) pixelData -= pitchInBytes * (handle->height - 1);
|
||||
|
||||
uint8_t used[(1 << 16) + 1]; //only 64k, so stack allocating is fine
|
||||
msf_cook_frame(&handle->currentFrame, pixelData, used, handle->width, handle->height, pitchInBytes,
|
||||
msf_imin(maxBitDepth, handle->previousFrame.depth + 160 / msf_imax(1, handle->previousFrame.count)));
|
||||
|
||||
MsfGifBuffer * buffer = msf_compress_frame(handle->customAllocatorContext, handle->width, handle->height,
|
||||
centiSecondsPerFame, handle->currentFrame, handle, used, handle->lzwMem);
|
||||
if (!buffer) { msf_free_gif_state(handle); return 0; }
|
||||
handle->listTail->next = buffer;
|
||||
handle->listTail = buffer;
|
||||
|
||||
//swap current and previous frames
|
||||
MsfCookedFrame tmp = handle->previousFrame;
|
||||
handle->previousFrame = handle->currentFrame;
|
||||
handle->currentFrame = tmp;
|
||||
|
||||
handle->framesSubmitted += 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
MsfGifResult msf_gif_end(MsfGifState * handle) { MsfTimeFunc
|
||||
if (!handle->listHead) { MsfGifResult empty = {0}; return empty; }
|
||||
|
||||
//first pass: determine total size
|
||||
size_t total = 1; //1 byte for trailing marker
|
||||
for (MsfGifBuffer * node = handle->listHead; node; node = node->next) { total += node->size; }
|
||||
|
||||
//second pass: write data
|
||||
uint8_t * buffer = (uint8_t *) MSF_GIF_MALLOC(handle->customAllocatorContext, total);
|
||||
if (buffer) {
|
||||
uint8_t * writeHead = buffer;
|
||||
for (MsfGifBuffer * node = handle->listHead; node; node = node->next) {
|
||||
memcpy(writeHead, node->data, node->size);
|
||||
writeHead += node->size;
|
||||
}
|
||||
*writeHead++ = 0x3B;
|
||||
}
|
||||
|
||||
//third pass: free buffers
|
||||
msf_free_gif_state(handle);
|
||||
|
||||
MsfGifResult ret = { buffer, total, total, handle->customAllocatorContext };
|
||||
return ret;
|
||||
}
|
||||
|
||||
void msf_gif_free(MsfGifResult result) { MsfTimeFunc
|
||||
if (result.data) { MSF_GIF_FREE(result.contextPointer, result.data, result.allocSize); }
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// To-file API ///
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int msf_gif_begin_to_file(MsfGifState * handle, int width, int height, MsfGifFileWriteFunc func, void * filePointer) {
|
||||
handle->fileWriteFunc = func;
|
||||
handle->fileWriteData = filePointer;
|
||||
return msf_gif_begin(handle, width, height);
|
||||
}
|
||||
|
||||
int msf_gif_frame_to_file(MsfGifState * handle, uint8_t * pixelData, int centiSecondsPerFame, int maxBitDepth, int pitchInBytes) {
|
||||
if (!msf_gif_frame(handle, pixelData, centiSecondsPerFame, maxBitDepth, pitchInBytes)) { return 0; }
|
||||
|
||||
//NOTE: this is a somewhat hacky implementation which is not perfectly efficient, but it's good enough for now
|
||||
MsfGifBuffer * head = handle->listHead;
|
||||
if (!handle->fileWriteFunc(head->data, head->size, 1, handle->fileWriteData)) { msf_free_gif_state(handle); return 0; }
|
||||
handle->listHead = head->next;
|
||||
MSF_GIF_FREE(handle->customAllocatorContext, head, offsetof(MsfGifBuffer, data) + head->size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int msf_gif_end_to_file(MsfGifState * handle) {
|
||||
//NOTE: this is a somewhat hacky implementation which is not perfectly efficient, but it's good enough for now
|
||||
MsfGifResult result = msf_gif_end(handle);
|
||||
int ret = (int) handle->fileWriteFunc(result.data, result.dataSize, 1, handle->fileWriteData);
|
||||
msf_gif_free(result);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif //MSF_GIF_ALREADY_IMPLEMENTED_IN_THIS_TRANSLATION_UNIT
|
||||
#endif //MSF_GIF_IMPL
|
||||
|
||||
/*
|
||||
------------------------------------------------------------------------------
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT License
|
||||
Copyright (c) 2021 Miles Fogle
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
1459
nanosvgrast.h
1459
nanosvgrast.h
File diff suppressed because it is too large
Load Diff
13
prosperon.cm
13
prosperon.cm
@@ -2,9 +2,9 @@ var prosperon = {}
|
||||
|
||||
// This file is hard coded for the SDL GPU case
|
||||
|
||||
var video = use('sdl/video')
|
||||
var surface = use('sdl/surface')
|
||||
var sdl_gpu = use('sdl/gpu')
|
||||
var video = use('sdl3/video')
|
||||
var surface = use('sdl3/surface')
|
||||
var sdl_gpu = use('sdl3/gpu')
|
||||
var io = use('cellfs')
|
||||
var geometry = use('geometry')
|
||||
var blob = use('blob')
|
||||
@@ -489,7 +489,7 @@ var time = use('time')
|
||||
var tilemap = use('tilemap')
|
||||
|
||||
var res = use('resources')
|
||||
var input = use('sdl/input')
|
||||
var input = use('sdl3/input')
|
||||
|
||||
var graphics = use('graphics')
|
||||
|
||||
@@ -858,6 +858,7 @@ cmd_fns.geometry = function(cmd)
|
||||
if (typeof cmd.image == 'object') {
|
||||
img = cmd.image
|
||||
} else {
|
||||
if (!cmd.image) return
|
||||
img = graphics.texture(cmd.image)
|
||||
if (!img) return
|
||||
}
|
||||
@@ -969,7 +970,7 @@ prosperon.create_batch = function create_batch(draw_cmds, done) {
|
||||
|
||||
copy_pass.end();
|
||||
|
||||
imgui.prepare(render_queue)
|
||||
// imgui.prepare(render_queue)
|
||||
|
||||
for (var g of new_tex)
|
||||
render_queue.generate_mipmaps(g)
|
||||
@@ -1163,7 +1164,7 @@ prosperon.create_batch = function create_batch(draw_cmds, done) {
|
||||
)
|
||||
}
|
||||
|
||||
imgui.endframe(render_queue, render_pass)
|
||||
// imgui.endframe(render_queue, render_pass)
|
||||
render_pass.end()
|
||||
|
||||
render_queue.submit()
|
||||
|
||||
742
qoa.h
742
qoa.h
@@ -1,742 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2023, Dominic Szablewski - https://phoboslab.org
|
||||
SPDX-License-Identifier: MIT
|
||||
|
||||
QOA - The "Quite OK Audio" format for fast, lossy audio compression
|
||||
|
||||
|
||||
-- Data Format
|
||||
|
||||
QOA encodes pulse-code modulated (PCM) audio data with up to 255 channels,
|
||||
sample rates from 1 up to 16777215 hertz and a bit depth of 16 bits.
|
||||
|
||||
The compression method employed in QOA is lossy; it discards some information
|
||||
from the uncompressed PCM data. For many types of audio signals this compression
|
||||
is "transparent", i.e. the difference from the original file is often not
|
||||
audible.
|
||||
|
||||
QOA encodes 20 samples of 16 bit PCM data into slices of 64 bits. A single
|
||||
sample therefore requires 3.2 bits of storage space, resulting in a 5x
|
||||
compression (16 / 3.2).
|
||||
|
||||
A QOA file consists of an 8 byte file header, followed by a number of frames.
|
||||
Each frame contains an 8 byte frame header, the current 16 byte en-/decoder
|
||||
state per channel and 256 slices per channel. Each slice is 8 bytes wide and
|
||||
encodes 20 samples of audio data.
|
||||
|
||||
All values, including the slices, are big endian. The file layout is as follows:
|
||||
|
||||
struct {
|
||||
struct {
|
||||
char magic[4]; // magic bytes "qoaf"
|
||||
uint32_t samples; // samples per channel in this file
|
||||
} file_header;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint8_t num_channels; // no. of channels
|
||||
uint24_t samplerate; // samplerate in hz
|
||||
uint16_t fsamples; // samples per channel in this frame
|
||||
uint16_t fsize; // frame size (includes this header)
|
||||
} frame_header;
|
||||
|
||||
struct {
|
||||
int16_t history[4]; // most recent last
|
||||
int16_t weights[4]; // most recent last
|
||||
} lms_state[num_channels];
|
||||
|
||||
qoa_slice_t slices[256][num_channels];
|
||||
|
||||
} frames[ceil(samples / (256 * 20))];
|
||||
} qoa_file_t;
|
||||
|
||||
Each `qoa_slice_t` contains a quantized scalefactor `sf_quant` and 20 quantized
|
||||
residuals `qrNN`:
|
||||
|
||||
.- QOA_SLICE -- 64 bits, 20 samples --------------------------/ /------------.
|
||||
| Byte[0] | Byte[1] | Byte[2] \ \ Byte[7] |
|
||||
| 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 | 7 6 5 / / 2 1 0 |
|
||||
|------------+--------+--------+--------+---------+---------+-\ \--+---------|
|
||||
| sf_quant | qr00 | qr01 | qr02 | qr03 | qr04 | / / | qr19 |
|
||||
`-------------------------------------------------------------\ \------------`
|
||||
|
||||
Each frame except the last must contain exactly 256 slices per channel. The last
|
||||
frame may contain between 1 .. 256 (inclusive) slices per channel. The last
|
||||
slice (for each channel) in the last frame may contain less than 20 samples; the
|
||||
slice still must be 8 bytes wide, with the unused samples zeroed out.
|
||||
|
||||
Channels are interleaved per slice. E.g. for 2 channel stereo:
|
||||
slice[0] = L, slice[1] = R, slice[2] = L, slice[3] = R ...
|
||||
|
||||
A valid QOA file or stream must have at least one frame. Each frame must contain
|
||||
at least one channel and one sample with a samplerate between 1 .. 16777215
|
||||
(inclusive).
|
||||
|
||||
If the total number of samples is not known by the encoder, the samples in the
|
||||
file header may be set to 0x00000000 to indicate that the encoder is
|
||||
"streaming". In a streaming context, the samplerate and number of channels may
|
||||
differ from frame to frame. For static files (those with samples set to a
|
||||
non-zero value), each frame must have the same number of channels and same
|
||||
samplerate.
|
||||
|
||||
Note that this implementation of QOA only handles files with a known total
|
||||
number of samples.
|
||||
|
||||
A decoder should support at least 8 channels. The channel layout for channel
|
||||
counts 1 .. 8 is:
|
||||
|
||||
1. Mono
|
||||
2. L, R
|
||||
3. L, R, C
|
||||
4. FL, FR, B/SL, B/SR
|
||||
5. FL, FR, C, B/SL, B/SR
|
||||
6. FL, FR, C, LFE, B/SL, B/SR
|
||||
7. FL, FR, C, LFE, B, SL, SR
|
||||
8. FL, FR, C, LFE, BL, BR, SL, SR
|
||||
|
||||
QOA predicts each audio sample based on the previously decoded ones using a
|
||||
"Sign-Sign Least Mean Squares Filter" (LMS). This prediction plus the
|
||||
dequantized residual forms the final output sample.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Header - Public functions */
|
||||
|
||||
#ifndef QOA_H
|
||||
#define QOA_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define QOA_MIN_FILESIZE 16
|
||||
#define QOA_MAX_CHANNELS 8
|
||||
|
||||
#define QOA_SLICE_LEN 20
|
||||
#define QOA_SLICES_PER_FRAME 256
|
||||
#define QOA_FRAME_LEN (QOA_SLICES_PER_FRAME * QOA_SLICE_LEN)
|
||||
#define QOA_LMS_LEN 4
|
||||
#define QOA_MAGIC 0x716f6166 /* 'qoaf' */
|
||||
|
||||
#define QOA_FRAME_SIZE(channels, slices) \
|
||||
(8 + QOA_LMS_LEN * 4 * channels + 8 * slices * channels)
|
||||
|
||||
typedef struct {
|
||||
int history[QOA_LMS_LEN];
|
||||
int weights[QOA_LMS_LEN];
|
||||
} qoa_lms_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned int channels;
|
||||
unsigned int samplerate;
|
||||
unsigned int samples;
|
||||
qoa_lms_t lms[QOA_MAX_CHANNELS];
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
double error;
|
||||
#endif
|
||||
} qoa_desc;
|
||||
|
||||
unsigned int qoa_encode_header(qoa_desc *qoa, unsigned char *bytes);
|
||||
unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned int frame_len, unsigned char *bytes);
|
||||
void *qoa_encode(const short *sample_data, qoa_desc *qoa, unsigned int *out_len);
|
||||
|
||||
unsigned int qoa_max_frame_size(qoa_desc *qoa);
|
||||
unsigned int qoa_decode_header(const unsigned char *bytes, int size, qoa_desc *qoa);
|
||||
unsigned int qoa_decode_frame(const unsigned char *bytes, unsigned int size, qoa_desc *qoa, short *sample_data, unsigned int *frame_len);
|
||||
short *qoa_decode(const unsigned char *bytes, int size, qoa_desc *file);
|
||||
|
||||
#ifndef QOA_NO_STDIO
|
||||
|
||||
int qoa_write(const char *filename, const short *sample_data, qoa_desc *qoa);
|
||||
void *qoa_read(const char *filename, qoa_desc *qoa);
|
||||
|
||||
#endif /* QOA_NO_STDIO */
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* QOA_H */
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Implementation */
|
||||
|
||||
#ifdef QOA_IMPLEMENTATION
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifndef QOA_MALLOC
|
||||
#define QOA_MALLOC(sz) malloc(sz)
|
||||
#define QOA_FREE(p) free(p)
|
||||
#endif
|
||||
|
||||
typedef unsigned long long qoa_uint64_t;
|
||||
|
||||
|
||||
/* The quant_tab provides an index into the dequant_tab for residuals in the
|
||||
range of -8 .. 8. It maps this range to just 3bits and becomes less accurate at
|
||||
the higher end. Note that the residual zero is identical to the lowest positive
|
||||
value. This is mostly fine, since the qoa_div() function always rounds away
|
||||
from zero. */
|
||||
|
||||
static const int qoa_quant_tab[17] = {
|
||||
7, 7, 7, 5, 5, 3, 3, 1, /* -8..-1 */
|
||||
0, /* 0 */
|
||||
0, 2, 2, 4, 4, 6, 6, 6 /* 1.. 8 */
|
||||
};
|
||||
|
||||
|
||||
/* We have 16 different scalefactors. Like the quantized residuals these become
|
||||
less accurate at the higher end. In theory, the highest scalefactor that we
|
||||
would need to encode the highest 16bit residual is (2**16)/8 = 8192. However we
|
||||
rely on the LMS filter to predict samples accurately enough that a maximum
|
||||
residual of one quarter of the 16 bit range is sufficient. I.e. with the
|
||||
scalefactor 2048 times the quant range of 8 we can encode residuals up to 2**14.
|
||||
|
||||
The scalefactor values are computed as:
|
||||
scalefactor_tab[s] <- round(pow(s + 1, 2.75)) */
|
||||
|
||||
static const int qoa_scalefactor_tab[16] = {
|
||||
1, 7, 21, 45, 84, 138, 211, 304, 421, 562, 731, 928, 1157, 1419, 1715, 2048
|
||||
};
|
||||
|
||||
|
||||
/* The reciprocal_tab maps each of the 16 scalefactors to their rounded
|
||||
reciprocals 1/scalefactor. This allows us to calculate the scaled residuals in
|
||||
the encoder with just one multiplication instead of an expensive division. We
|
||||
do this in .16 fixed point with integers, instead of floats.
|
||||
|
||||
The reciprocal_tab is computed as:
|
||||
reciprocal_tab[s] <- ((1<<16) + scalefactor_tab[s] - 1) / scalefactor_tab[s] */
|
||||
|
||||
static const int qoa_reciprocal_tab[16] = {
|
||||
65536, 9363, 3121, 1457, 781, 475, 311, 216, 156, 117, 90, 71, 57, 47, 39, 32
|
||||
};
|
||||
|
||||
|
||||
/* The dequant_tab maps each of the scalefactors and quantized residuals to
|
||||
their unscaled & dequantized version.
|
||||
|
||||
Since qoa_div rounds away from the zero, the smallest entries are mapped to 3/4
|
||||
instead of 1. The dequant_tab assumes the following dequantized values for each
|
||||
of the quant_tab indices and is computed as:
|
||||
float dqt[8] = {0.75, -0.75, 2.5, -2.5, 4.5, -4.5, 7, -7};
|
||||
dequant_tab[s][q] <- round_ties_away_from_zero(scalefactor_tab[s] * dqt[q])
|
||||
|
||||
The rounding employed here is "to nearest, ties away from zero", i.e. positive
|
||||
and negative values are treated symmetrically.
|
||||
*/
|
||||
|
||||
static const int qoa_dequant_tab[16][8] = {
|
||||
{ 1, -1, 3, -3, 5, -5, 7, -7},
|
||||
{ 5, -5, 18, -18, 32, -32, 49, -49},
|
||||
{ 16, -16, 53, -53, 95, -95, 147, -147},
|
||||
{ 34, -34, 113, -113, 203, -203, 315, -315},
|
||||
{ 63, -63, 210, -210, 378, -378, 588, -588},
|
||||
{ 104, -104, 345, -345, 621, -621, 966, -966},
|
||||
{ 158, -158, 528, -528, 950, -950, 1477, -1477},
|
||||
{ 228, -228, 760, -760, 1368, -1368, 2128, -2128},
|
||||
{ 316, -316, 1053, -1053, 1895, -1895, 2947, -2947},
|
||||
{ 422, -422, 1405, -1405, 2529, -2529, 3934, -3934},
|
||||
{ 548, -548, 1828, -1828, 3290, -3290, 5117, -5117},
|
||||
{ 696, -696, 2320, -2320, 4176, -4176, 6496, -6496},
|
||||
{ 868, -868, 2893, -2893, 5207, -5207, 8099, -8099},
|
||||
{1064, -1064, 3548, -3548, 6386, -6386, 9933, -9933},
|
||||
{1286, -1286, 4288, -4288, 7718, -7718, 12005, -12005},
|
||||
{1536, -1536, 5120, -5120, 9216, -9216, 14336, -14336},
|
||||
};
|
||||
|
||||
|
||||
/* The Least Mean Squares Filter is the heart of QOA. It predicts the next
|
||||
sample based on the previous 4 reconstructed samples. It does so by continuously
|
||||
adjusting 4 weights based on the residual of the previous prediction.
|
||||
|
||||
The next sample is predicted as the sum of (weight[i] * history[i]).
|
||||
|
||||
The adjustment of the weights is done with a "Sign-Sign-LMS" that adds or
|
||||
subtracts the residual to each weight, based on the corresponding sample from
|
||||
the history. This, surprisingly, is sufficient to get worthwhile predictions.
|
||||
|
||||
This is all done with fixed point integers. Hence the right-shifts when updating
|
||||
the weights and calculating the prediction. */
|
||||
|
||||
static int qoa_lms_predict(qoa_lms_t *lms) {
|
||||
int prediction = 0;
|
||||
for (int i = 0; i < QOA_LMS_LEN; i++) {
|
||||
prediction += lms->weights[i] * lms->history[i];
|
||||
}
|
||||
return prediction >> 13;
|
||||
}
|
||||
|
||||
static void qoa_lms_update(qoa_lms_t *lms, int sample, int residual) {
|
||||
int delta = residual >> 4;
|
||||
for (int i = 0; i < QOA_LMS_LEN; i++) {
|
||||
lms->weights[i] += lms->history[i] < 0 ? -delta : delta;
|
||||
}
|
||||
|
||||
for (int i = 0; i < QOA_LMS_LEN-1; i++) {
|
||||
lms->history[i] = lms->history[i+1];
|
||||
}
|
||||
lms->history[QOA_LMS_LEN-1] = sample;
|
||||
}
|
||||
|
||||
|
||||
/* qoa_div() implements a rounding division, but avoids rounding to zero for
|
||||
small numbers. E.g. 0.1 will be rounded to 1. Note that 0 itself still
|
||||
returns as 0, which is handled in the qoa_quant_tab[].
|
||||
qoa_div() takes an index into the .16 fixed point qoa_reciprocal_tab as an
|
||||
argument, so it can do the division with a cheaper integer multiplication. */
|
||||
|
||||
static inline int qoa_div(int v, int scalefactor) {
|
||||
int reciprocal = qoa_reciprocal_tab[scalefactor];
|
||||
int n = (v * reciprocal + (1 << 15)) >> 16;
|
||||
n = n + ((v > 0) - (v < 0)) - ((n > 0) - (n < 0)); /* round away from 0 */
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline int qoa_clamp(int v, int min, int max) {
|
||||
if (v < min) { return min; }
|
||||
if (v > max) { return max; }
|
||||
return v;
|
||||
}
|
||||
|
||||
/* This specialized clamp function for the signed 16 bit range improves decode
|
||||
performance quite a bit. The extra if() statement works nicely with the CPUs
|
||||
branch prediction as this branch is rarely taken. */
|
||||
|
||||
static inline int qoa_clamp_s16(int v) {
|
||||
if ((unsigned int)(v + 32768) > 65535) {
|
||||
if (v < -32768) { return -32768; }
|
||||
if (v > 32767) { return 32767; }
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static inline qoa_uint64_t qoa_read_u64(const unsigned char *bytes, unsigned int *p) {
|
||||
bytes += *p;
|
||||
*p += 8;
|
||||
return
|
||||
((qoa_uint64_t)(bytes[0]) << 56) | ((qoa_uint64_t)(bytes[1]) << 48) |
|
||||
((qoa_uint64_t)(bytes[2]) << 40) | ((qoa_uint64_t)(bytes[3]) << 32) |
|
||||
((qoa_uint64_t)(bytes[4]) << 24) | ((qoa_uint64_t)(bytes[5]) << 16) |
|
||||
((qoa_uint64_t)(bytes[6]) << 8) | ((qoa_uint64_t)(bytes[7]) << 0);
|
||||
}
|
||||
|
||||
static inline void qoa_write_u64(qoa_uint64_t v, unsigned char *bytes, unsigned int *p) {
|
||||
bytes += *p;
|
||||
*p += 8;
|
||||
bytes[0] = (v >> 56) & 0xff;
|
||||
bytes[1] = (v >> 48) & 0xff;
|
||||
bytes[2] = (v >> 40) & 0xff;
|
||||
bytes[3] = (v >> 32) & 0xff;
|
||||
bytes[4] = (v >> 24) & 0xff;
|
||||
bytes[5] = (v >> 16) & 0xff;
|
||||
bytes[6] = (v >> 8) & 0xff;
|
||||
bytes[7] = (v >> 0) & 0xff;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Encoder */
|
||||
|
||||
unsigned int qoa_encode_header(qoa_desc *qoa, unsigned char *bytes) {
|
||||
unsigned int p = 0;
|
||||
qoa_write_u64(((qoa_uint64_t)QOA_MAGIC << 32) | qoa->samples, bytes, &p);
|
||||
return p;
|
||||
}
|
||||
|
||||
unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned int frame_len, unsigned char *bytes) {
|
||||
unsigned int channels = qoa->channels;
|
||||
|
||||
unsigned int p = 0;
|
||||
unsigned int slices = (frame_len + QOA_SLICE_LEN - 1) / QOA_SLICE_LEN;
|
||||
unsigned int frame_size = QOA_FRAME_SIZE(channels, slices);
|
||||
int prev_scalefactor[QOA_MAX_CHANNELS] = {0};
|
||||
|
||||
/* Write the frame header */
|
||||
qoa_write_u64((
|
||||
(qoa_uint64_t)qoa->channels << 56 |
|
||||
(qoa_uint64_t)qoa->samplerate << 32 |
|
||||
(qoa_uint64_t)frame_len << 16 |
|
||||
(qoa_uint64_t)frame_size
|
||||
), bytes, &p);
|
||||
|
||||
|
||||
for (unsigned int c = 0; c < channels; c++) {
|
||||
/* Write the current LMS state */
|
||||
qoa_uint64_t weights = 0;
|
||||
qoa_uint64_t history = 0;
|
||||
for (int i = 0; i < QOA_LMS_LEN; i++) {
|
||||
history = (history << 16) | (qoa->lms[c].history[i] & 0xffff);
|
||||
weights = (weights << 16) | (qoa->lms[c].weights[i] & 0xffff);
|
||||
}
|
||||
qoa_write_u64(history, bytes, &p);
|
||||
qoa_write_u64(weights, bytes, &p);
|
||||
}
|
||||
|
||||
/* We encode all samples with the channels interleaved on a slice level.
|
||||
E.g. for stereo: (ch-0, slice 0), (ch 1, slice 0), (ch 0, slice 1), ...*/
|
||||
for (unsigned int sample_index = 0; sample_index < frame_len; sample_index += QOA_SLICE_LEN) {
|
||||
|
||||
for (unsigned int c = 0; c < channels; c++) {
|
||||
int slice_len = qoa_clamp(QOA_SLICE_LEN, 0, frame_len - sample_index);
|
||||
int slice_start = sample_index * channels + c;
|
||||
int slice_end = (sample_index + slice_len) * channels + c;
|
||||
|
||||
/* Brute for search for the best scalefactor. Just go through all
|
||||
16 scalefactors, encode all samples for the current slice and
|
||||
meassure the total squared error. */
|
||||
qoa_uint64_t best_rank = -1;
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
qoa_uint64_t best_error = -1;
|
||||
#endif
|
||||
qoa_uint64_t best_slice = 0;
|
||||
qoa_lms_t best_lms;
|
||||
int best_scalefactor = 0;
|
||||
|
||||
for (int sfi = 0; sfi < 16; sfi++) {
|
||||
/* There is a strong correlation between the scalefactors of
|
||||
neighboring slices. As an optimization, start testing
|
||||
the best scalefactor of the previous slice first. */
|
||||
int scalefactor = (sfi + prev_scalefactor[c]) % 16;
|
||||
|
||||
/* We have to reset the LMS state to the last known good one
|
||||
before trying each scalefactor, as each pass updates the LMS
|
||||
state when encoding. */
|
||||
qoa_lms_t lms = qoa->lms[c];
|
||||
qoa_uint64_t slice = scalefactor;
|
||||
qoa_uint64_t current_rank = 0;
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
qoa_uint64_t current_error = 0;
|
||||
#endif
|
||||
|
||||
for (int si = slice_start; si < slice_end; si += channels) {
|
||||
int sample = sample_data[si];
|
||||
int predicted = qoa_lms_predict(&lms);
|
||||
|
||||
int residual = sample - predicted;
|
||||
int scaled = qoa_div(residual, scalefactor);
|
||||
int clamped = qoa_clamp(scaled, -8, 8);
|
||||
int quantized = qoa_quant_tab[clamped + 8];
|
||||
int dequantized = qoa_dequant_tab[scalefactor][quantized];
|
||||
int reconstructed = qoa_clamp_s16(predicted + dequantized);
|
||||
|
||||
|
||||
/* If the weights have grown too large, we introduce a penalty
|
||||
here. This prevents pops/clicks in certain problem cases */
|
||||
int weights_penalty = ((
|
||||
lms.weights[0] * lms.weights[0] +
|
||||
lms.weights[1] * lms.weights[1] +
|
||||
lms.weights[2] * lms.weights[2] +
|
||||
lms.weights[3] * lms.weights[3]
|
||||
) >> 18) - 0x8ff;
|
||||
if (weights_penalty < 0) {
|
||||
weights_penalty = 0;
|
||||
}
|
||||
|
||||
long long error = (sample - reconstructed);
|
||||
qoa_uint64_t error_sq = error * error;
|
||||
|
||||
current_rank += error_sq + weights_penalty * weights_penalty;
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
current_error += error_sq;
|
||||
#endif
|
||||
if (current_rank > best_rank) {
|
||||
break;
|
||||
}
|
||||
|
||||
qoa_lms_update(&lms, reconstructed, dequantized);
|
||||
slice = (slice << 3) | quantized;
|
||||
}
|
||||
|
||||
if (current_rank < best_rank) {
|
||||
best_rank = current_rank;
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
best_error = current_error;
|
||||
#endif
|
||||
best_slice = slice;
|
||||
best_lms = lms;
|
||||
best_scalefactor = scalefactor;
|
||||
}
|
||||
}
|
||||
|
||||
prev_scalefactor[c] = best_scalefactor;
|
||||
|
||||
qoa->lms[c] = best_lms;
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
qoa->error += best_error;
|
||||
#endif
|
||||
|
||||
/* If this slice was shorter than QOA_SLICE_LEN, we have to left-
|
||||
shift all encoded data, to ensure the rightmost bits are the empty
|
||||
ones. This should only happen in the last frame of a file as all
|
||||
slices are completely filled otherwise. */
|
||||
best_slice <<= (QOA_SLICE_LEN - slice_len) * 3;
|
||||
qoa_write_u64(best_slice, bytes, &p);
|
||||
}
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void *qoa_encode(const short *sample_data, qoa_desc *qoa, unsigned int *out_len) {
|
||||
if (
|
||||
qoa->samples == 0 ||
|
||||
qoa->samplerate == 0 || qoa->samplerate > 0xffffff ||
|
||||
qoa->channels == 0 || qoa->channels > QOA_MAX_CHANNELS
|
||||
) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Calculate the encoded size and allocate */
|
||||
unsigned int num_frames = (qoa->samples + QOA_FRAME_LEN-1) / QOA_FRAME_LEN;
|
||||
unsigned int num_slices = (qoa->samples + QOA_SLICE_LEN-1) / QOA_SLICE_LEN;
|
||||
unsigned int encoded_size = 8 + /* 8 byte file header */
|
||||
num_frames * 8 + /* 8 byte frame headers */
|
||||
num_frames * QOA_LMS_LEN * 4 * qoa->channels + /* 4 * 4 bytes lms state per channel */
|
||||
num_slices * 8 * qoa->channels; /* 8 byte slices */
|
||||
|
||||
unsigned char *bytes = QOA_MALLOC(encoded_size);
|
||||
|
||||
for (unsigned int c = 0; c < qoa->channels; c++) {
|
||||
/* Set the initial LMS weights to {0, 0, -1, 2}. This helps with the
|
||||
prediction of the first few ms of a file. */
|
||||
qoa->lms[c].weights[0] = 0;
|
||||
qoa->lms[c].weights[1] = 0;
|
||||
qoa->lms[c].weights[2] = -(1<<13);
|
||||
qoa->lms[c].weights[3] = (1<<14);
|
||||
|
||||
/* Explicitly set the history samples to 0, as we might have some
|
||||
garbage in there. */
|
||||
for (int i = 0; i < QOA_LMS_LEN; i++) {
|
||||
qoa->lms[c].history[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Encode the header and go through all frames */
|
||||
unsigned int p = qoa_encode_header(qoa, bytes);
|
||||
#ifdef QOA_RECORD_TOTAL_ERROR
|
||||
qoa->error = 0;
|
||||
#endif
|
||||
|
||||
int frame_len = QOA_FRAME_LEN;
|
||||
for (unsigned int sample_index = 0; sample_index < qoa->samples; sample_index += frame_len) {
|
||||
frame_len = qoa_clamp(QOA_FRAME_LEN, 0, qoa->samples - sample_index);
|
||||
const short *frame_samples = sample_data + sample_index * qoa->channels;
|
||||
unsigned int frame_size = qoa_encode_frame(frame_samples, qoa, frame_len, bytes + p);
|
||||
p += frame_size;
|
||||
}
|
||||
|
||||
*out_len = p;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Decoder */
|
||||
|
||||
unsigned int qoa_max_frame_size(qoa_desc *qoa) {
|
||||
return QOA_FRAME_SIZE(qoa->channels, QOA_SLICES_PER_FRAME);
|
||||
}
|
||||
|
||||
unsigned int qoa_decode_header(const unsigned char *bytes, int size, qoa_desc *qoa) {
|
||||
unsigned int p = 0;
|
||||
if (size < QOA_MIN_FILESIZE) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Read the file header, verify the magic number ('qoaf') and read the
|
||||
total number of samples. */
|
||||
qoa_uint64_t file_header = qoa_read_u64(bytes, &p);
|
||||
|
||||
if ((file_header >> 32) != QOA_MAGIC) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
qoa->samples = file_header & 0xffffffff;
|
||||
if (!qoa->samples) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Peek into the first frame header to get the number of channels and
|
||||
the samplerate. */
|
||||
qoa_uint64_t frame_header = qoa_read_u64(bytes, &p);
|
||||
qoa->channels = (frame_header >> 56) & 0x0000ff;
|
||||
qoa->samplerate = (frame_header >> 32) & 0xffffff;
|
||||
|
||||
if (qoa->channels == 0 || qoa->samples == 0 || qoa->samplerate == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 8;
|
||||
}
|
||||
|
||||
unsigned int qoa_decode_frame(const unsigned char *bytes, unsigned int size, qoa_desc *qoa, short *sample_data, unsigned int *frame_len) {
|
||||
unsigned int p = 0;
|
||||
*frame_len = 0;
|
||||
|
||||
if (size < 8 + QOA_LMS_LEN * 4 * qoa->channels) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Read and verify the frame header */
|
||||
qoa_uint64_t frame_header = qoa_read_u64(bytes, &p);
|
||||
unsigned int channels = (frame_header >> 56) & 0x0000ff;
|
||||
unsigned int samplerate = (frame_header >> 32) & 0xffffff;
|
||||
unsigned int samples = (frame_header >> 16) & 0x00ffff;
|
||||
unsigned int frame_size = (frame_header ) & 0x00ffff;
|
||||
|
||||
unsigned int data_size = frame_size - 8 - QOA_LMS_LEN * 4 * channels;
|
||||
unsigned int num_slices = data_size / 8;
|
||||
unsigned int max_total_samples = num_slices * QOA_SLICE_LEN;
|
||||
|
||||
if (
|
||||
channels != qoa->channels ||
|
||||
samplerate != qoa->samplerate ||
|
||||
frame_size > size ||
|
||||
samples * channels > max_total_samples
|
||||
) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Read the LMS state: 4 x 2 bytes history, 4 x 2 bytes weights per channel */
|
||||
for (unsigned int c = 0; c < channels; c++) {
|
||||
qoa_uint64_t history = qoa_read_u64(bytes, &p);
|
||||
qoa_uint64_t weights = qoa_read_u64(bytes, &p);
|
||||
for (int i = 0; i < QOA_LMS_LEN; i++) {
|
||||
qoa->lms[c].history[i] = ((signed short)(history >> 48));
|
||||
history <<= 16;
|
||||
qoa->lms[c].weights[i] = ((signed short)(weights >> 48));
|
||||
weights <<= 16;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Decode all slices for all channels in this frame */
|
||||
for (unsigned int sample_index = 0; sample_index < samples; sample_index += QOA_SLICE_LEN) {
|
||||
for (unsigned int c = 0; c < channels; c++) {
|
||||
qoa_uint64_t slice = qoa_read_u64(bytes, &p);
|
||||
|
||||
int scalefactor = (slice >> 60) & 0xf;
|
||||
slice <<= 4;
|
||||
|
||||
int slice_start = sample_index * channels + c;
|
||||
int slice_end = qoa_clamp(sample_index + QOA_SLICE_LEN, 0, samples) * channels + c;
|
||||
|
||||
for (int si = slice_start; si < slice_end; si += channels) {
|
||||
int predicted = qoa_lms_predict(&qoa->lms[c]);
|
||||
int quantized = (slice >> 61) & 0x7;
|
||||
int dequantized = qoa_dequant_tab[scalefactor][quantized];
|
||||
int reconstructed = qoa_clamp_s16(predicted + dequantized);
|
||||
|
||||
sample_data[si] = reconstructed;
|
||||
slice <<= 3;
|
||||
|
||||
qoa_lms_update(&qoa->lms[c], reconstructed, dequantized);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*frame_len = samples;
|
||||
return p;
|
||||
}
|
||||
|
||||
short *qoa_decode(const unsigned char *bytes, int size, qoa_desc *qoa) {
|
||||
unsigned int p = qoa_decode_header(bytes, size, qoa);
|
||||
if (!p) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Calculate the required size of the sample buffer and allocate */
|
||||
int total_samples = qoa->samples * qoa->channels;
|
||||
short *sample_data = QOA_MALLOC(total_samples * sizeof(short));
|
||||
|
||||
unsigned int sample_index = 0;
|
||||
unsigned int frame_len;
|
||||
unsigned int frame_size;
|
||||
|
||||
/* Decode all frames */
|
||||
do {
|
||||
short *sample_ptr = sample_data + sample_index * qoa->channels;
|
||||
frame_size = qoa_decode_frame(bytes + p, size - p, qoa, sample_ptr, &frame_len);
|
||||
|
||||
p += frame_size;
|
||||
sample_index += frame_len;
|
||||
} while (frame_size && sample_index < qoa->samples);
|
||||
|
||||
qoa->samples = sample_index;
|
||||
return sample_data;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
File read/write convenience functions */
|
||||
|
||||
#ifndef QOA_NO_STDIO
|
||||
#include <stdio.h>
|
||||
|
||||
int qoa_write(const char *filename, const short *sample_data, qoa_desc *qoa) {
|
||||
FILE *f = fopen(filename, "wb");
|
||||
unsigned int size;
|
||||
void *encoded;
|
||||
|
||||
if (!f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
encoded = qoa_encode(sample_data, qoa, &size);
|
||||
if (!encoded) {
|
||||
fclose(f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fwrite(encoded, 1, size, f);
|
||||
fclose(f);
|
||||
|
||||
QOA_FREE(encoded);
|
||||
return size;
|
||||
}
|
||||
|
||||
void *qoa_read(const char *filename, qoa_desc *qoa) {
|
||||
FILE *f = fopen(filename, "rb");
|
||||
int size, bytes_read;
|
||||
void *data;
|
||||
short *sample_data;
|
||||
|
||||
if (!f) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fseek(f, 0, SEEK_END);
|
||||
size = ftell(f);
|
||||
if (size <= 0) {
|
||||
fclose(f);
|
||||
return NULL;
|
||||
}
|
||||
fseek(f, 0, SEEK_SET);
|
||||
|
||||
data = QOA_MALLOC(size);
|
||||
if (!data) {
|
||||
fclose(f);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bytes_read = fread(data, 1, size, f);
|
||||
fclose(f);
|
||||
|
||||
sample_data = qoa_decode(data, bytes_read, qoa);
|
||||
QOA_FREE(data);
|
||||
return sample_data;
|
||||
}
|
||||
|
||||
#endif /* QOA_NO_STDIO */
|
||||
#endif /* QOA_IMPLEMENTATION */
|
||||
178
qoi.c
178
qoi.c
@@ -1,178 +0,0 @@
|
||||
#define QOI_IMPLEMENTATION
|
||||
#include "qoi.h"
|
||||
#include "cell.h"
|
||||
#include "sdl.h"
|
||||
#include <SDL3/SDL.h>
|
||||
#include <string.h>
|
||||
|
||||
// Helper function to check for integer overflow in size calculations
|
||||
static int check_size_overflow(size_t a, size_t b, size_t c, size_t *result)
|
||||
{
|
||||
if (a > SIZE_MAX / b) return 1;
|
||||
size_t temp = a * b;
|
||||
if (temp > SIZE_MAX / c) return 1;
|
||||
*result = temp * c;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// QOI compression/encoding
|
||||
JSValue js_qoi_encode(JSContext *js, JSValue this_val, int argc, JSValueConst *argv)
|
||||
{
|
||||
if (argc < 1)
|
||||
return JS_ThrowTypeError(js, "compress_qoi requires an object argument");
|
||||
|
||||
// Check if width/height properties exist
|
||||
JSValue width_val = JS_GetPropertyStr(js, argv[0], "width");
|
||||
JSValue height_val = JS_GetPropertyStr(js, argv[0], "height");
|
||||
|
||||
if (JS_IsNull(width_val) || JS_IsNull(height_val)) {
|
||||
JS_FreeValue(js, width_val);
|
||||
JS_FreeValue(js, height_val);
|
||||
return JS_ThrowTypeError(js, "compress_qoi requires width and height properties");
|
||||
}
|
||||
|
||||
int width, height;
|
||||
if (JS_ToInt32(js, &width, width_val) < 0 || JS_ToInt32(js, &height, height_val) < 0) {
|
||||
JS_FreeValue(js, width_val);
|
||||
JS_FreeValue(js, height_val);
|
||||
return JS_ThrowTypeError(js, "width and height must be numbers");
|
||||
}
|
||||
JS_FreeValue(js, width_val);
|
||||
JS_FreeValue(js, height_val);
|
||||
|
||||
if (width < 1 || height < 1)
|
||||
return JS_ThrowRangeError(js, "width and height must be at least 1");
|
||||
|
||||
// Get pixel format
|
||||
JSValue format_val = JS_GetPropertyStr(js, argv[0], "format");
|
||||
SDL_PixelFormat format = js2SDL_PixelFormat(js, format_val);
|
||||
JS_FreeValue(js, format_val);
|
||||
|
||||
if (format == SDL_PIXELFORMAT_UNKNOWN)
|
||||
return JS_ThrowTypeError(js, "Invalid or missing pixel format");
|
||||
|
||||
// Get pixels
|
||||
JSValue pixels_val = JS_GetPropertyStr(js, argv[0], "pixels");
|
||||
size_t pixel_len;
|
||||
void *pixel_data = js_get_blob_data(js, &pixel_len, pixels_val);
|
||||
JS_FreeValue(js, pixels_val);
|
||||
|
||||
if (pixel_data == -1)
|
||||
return JS_EXCEPTION;
|
||||
if (!pixel_data)
|
||||
return JS_ThrowTypeError(js, "blob has no data");
|
||||
|
||||
// Validate buffer size
|
||||
int bytes_per_pixel = SDL_BYTESPERPIXEL(format);
|
||||
size_t required_size;
|
||||
if (check_size_overflow(width, height, bytes_per_pixel, &required_size)) {
|
||||
JS_FreeValue(js, pixels_val);
|
||||
return JS_ThrowRangeError(js, "Image dimensions too large");
|
||||
}
|
||||
|
||||
if (pixel_len < required_size)
|
||||
return JS_ThrowRangeError(js, "pixels buffer too small for %dx%d format (need %zu bytes, got %zu)",
|
||||
width, height, required_size, pixel_len);
|
||||
|
||||
// Get colorspace (optional, default to sRGB)
|
||||
int colorspace = 0; // QOI_SRGB
|
||||
if (argc > 1) {
|
||||
colorspace = JS_ToBool(js, argv[1]);
|
||||
}
|
||||
|
||||
// Determine number of channels based on format
|
||||
int channels = SDL_ISPIXELFORMAT_ALPHA(format) ? 4 : 3;
|
||||
|
||||
// Prepare QOI descriptor
|
||||
qoi_desc desc = {
|
||||
.width = width,
|
||||
.height = height,
|
||||
.channels = channels,
|
||||
.colorspace = colorspace
|
||||
};
|
||||
|
||||
// Encode to QOI
|
||||
int out_len;
|
||||
void *qoi_data = qoi_encode(pixel_data, &desc, &out_len);
|
||||
|
||||
if (!qoi_data)
|
||||
return JS_ThrowInternalError(js, "QOI encoding failed");
|
||||
|
||||
// Create result object
|
||||
JSValue result = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, result, "width", JS_NewInt32(js, width));
|
||||
JS_SetPropertyStr(js, result, "height", JS_NewInt32(js, height));
|
||||
JS_SetPropertyStr(js, result, "format", JS_NewString(js, "qoi"));
|
||||
JS_SetPropertyStr(js, result, "channels", JS_NewInt32(js, channels));
|
||||
JS_SetPropertyStr(js, result, "colorspace", JS_NewInt32(js, colorspace));
|
||||
|
||||
JSValue compressed_pixels = js_new_blob_stoned_copy(js, qoi_data, out_len);
|
||||
free(qoi_data); // Free the QOI buffer after copying to blob
|
||||
JS_SetPropertyStr(js, result, "pixels", compressed_pixels);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// QOI decompression/decoding
|
||||
JSValue js_qoi_decode(JSContext *js, JSValue this_val, int argc, JSValueConst *argv)
|
||||
{
|
||||
size_t len;
|
||||
void *raw = js_get_blob_data(js, &len, argv[0]);
|
||||
if (raw == -1) return JS_EXCEPTION;
|
||||
if (!raw) return JS_ThrowReferenceError(js, "could not get QOI data from array buffer");
|
||||
|
||||
qoi_desc desc;
|
||||
void *data = qoi_decode(raw, len, &desc, 0); // 0 means use channels from file
|
||||
|
||||
if (!data)
|
||||
return JS_NULL; // Return null if not valid QOI
|
||||
|
||||
// QOI always decodes to either RGB or RGBA based on the file's channel count
|
||||
int channels = desc.channels;
|
||||
int pitch = desc.width * channels;
|
||||
size_t pixels_size = pitch * desc.height;
|
||||
|
||||
// If it's RGB, convert to RGBA for consistency
|
||||
void *rgba_data = data;
|
||||
if (channels == 3) {
|
||||
rgba_data = malloc(desc.width * desc.height * 4);
|
||||
if (!rgba_data) {
|
||||
free(data);
|
||||
return JS_ThrowOutOfMemory(js);
|
||||
}
|
||||
|
||||
// Convert RGB to RGBA
|
||||
unsigned char *src = (unsigned char*)data;
|
||||
unsigned char *dst = (unsigned char*)rgba_data;
|
||||
for (int i = 0; i < desc.width * desc.height; i++) {
|
||||
dst[i*4] = src[i*3];
|
||||
dst[i*4+1] = src[i*3+1];
|
||||
dst[i*4+2] = src[i*3+2];
|
||||
dst[i*4+3] = 255;
|
||||
}
|
||||
free(data);
|
||||
pitch = desc.width * 4;
|
||||
pixels_size = pitch * desc.height;
|
||||
}
|
||||
|
||||
// Create JS object with surface data
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "width", JS_NewInt32(js, desc.width));
|
||||
JS_SetPropertyStr(js, obj, "height", JS_NewInt32(js, desc.height));
|
||||
JS_SetPropertyStr(js, obj, "format", JS_NewString(js, "rgba32"));
|
||||
JS_SetPropertyStr(js, obj, "pitch", JS_NewInt32(js, pitch));
|
||||
JS_SetPropertyStr(js, obj, "pixels", js_new_blob_stoned_copy(js, rgba_data, pixels_size));
|
||||
JS_SetPropertyStr(js, obj, "depth", JS_NewInt32(js, 8));
|
||||
JS_SetPropertyStr(js, obj, "hdr", JS_NewBool(js, 0));
|
||||
JS_SetPropertyStr(js, obj, "colorspace", JS_NewInt32(js, desc.colorspace));
|
||||
|
||||
free(rgba_data);
|
||||
return obj;
|
||||
}
|
||||
|
||||
static const JSCFunctionListEntry js_qoi_funcs[] = {
|
||||
MIST_FUNC_DEF(qoi, encode, 1),
|
||||
MIST_FUNC_DEF(qoi, decode, 1)
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_qoi_funcs)
|
||||
649
qoi.h
649
qoi.h
@@ -1,649 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2021, Dominic Szablewski - https://phoboslab.org
|
||||
SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
QOI - The "Quite OK Image" format for fast, lossless image compression
|
||||
|
||||
-- About
|
||||
|
||||
QOI encodes and decodes images in a lossless format. Compared to stb_image and
|
||||
stb_image_write QOI offers 20x-50x faster encoding, 3x-4x faster decoding and
|
||||
20% better compression.
|
||||
|
||||
|
||||
-- Synopsis
|
||||
|
||||
// Define `QOI_IMPLEMENTATION` in *one* C/C++ file before including this
|
||||
// library to create the implementation.
|
||||
|
||||
#define QOI_IMPLEMENTATION
|
||||
#include "qoi.h"
|
||||
|
||||
// Encode and store an RGBA buffer to the file system. The qoi_desc describes
|
||||
// the input pixel data.
|
||||
qoi_write("image_new.qoi", rgba_pixels, &(qoi_desc){
|
||||
.width = 1920,
|
||||
.height = 1080,
|
||||
.channels = 4,
|
||||
.colorspace = QOI_SRGB
|
||||
});
|
||||
|
||||
// Load and decode a QOI image from the file system into a 32bbp RGBA buffer.
|
||||
// The qoi_desc struct will be filled with the width, height, number of channels
|
||||
// and colorspace read from the file header.
|
||||
qoi_desc desc;
|
||||
void *rgba_pixels = qoi_read("image.qoi", &desc, 4);
|
||||
|
||||
|
||||
|
||||
-- Documentation
|
||||
|
||||
This library provides the following functions;
|
||||
- qoi_read -- read and decode a QOI file
|
||||
- qoi_decode -- decode the raw bytes of a QOI image from memory
|
||||
- qoi_write -- encode and write a QOI file
|
||||
- qoi_encode -- encode an rgba buffer into a QOI image in memory
|
||||
|
||||
See the function declaration below for the signature and more information.
|
||||
|
||||
If you don't want/need the qoi_read and qoi_write functions, you can define
|
||||
QOI_NO_STDIO before including this library.
|
||||
|
||||
This library uses malloc() and free(). To supply your own malloc implementation
|
||||
you can define QOI_MALLOC and QOI_FREE before including this library.
|
||||
|
||||
This library uses memset() to zero-initialize the index. To supply your own
|
||||
implementation you can define QOI_ZEROARR before including this library.
|
||||
|
||||
|
||||
-- Data Format
|
||||
|
||||
A QOI file has a 14 byte header, followed by any number of data "chunks" and an
|
||||
8-byte end marker.
|
||||
|
||||
struct qoi_header_t {
|
||||
char magic[4]; // magic bytes "qoif"
|
||||
uint32_t width; // image width in pixels (BE)
|
||||
uint32_t height; // image height in pixels (BE)
|
||||
uint8_t channels; // 3 = RGB, 4 = RGBA
|
||||
uint8_t colorspace; // 0 = sRGB with linear alpha, 1 = all channels linear
|
||||
};
|
||||
|
||||
Images are encoded row by row, left to right, top to bottom. The decoder and
|
||||
encoder start with {r: 0, g: 0, b: 0, a: 255} as the previous pixel value. An
|
||||
image is complete when all pixels specified by width * height have been covered.
|
||||
|
||||
Pixels are encoded as
|
||||
- a run of the previous pixel
|
||||
- an index into an array of previously seen pixels
|
||||
- a difference to the previous pixel value in r,g,b
|
||||
- full r,g,b or r,g,b,a values
|
||||
|
||||
The color channels are assumed to not be premultiplied with the alpha channel
|
||||
("un-premultiplied alpha").
|
||||
|
||||
A running array[64] (zero-initialized) of previously seen pixel values is
|
||||
maintained by the encoder and decoder. Each pixel that is seen by the encoder
|
||||
and decoder is put into this array at the position formed by a hash function of
|
||||
the color value. In the encoder, if the pixel value at the index matches the
|
||||
current pixel, this index position is written to the stream as QOI_OP_INDEX.
|
||||
The hash function for the index is:
|
||||
|
||||
index_position = (r * 3 + g * 5 + b * 7 + a * 11) % 64
|
||||
|
||||
Each chunk starts with a 2- or 8-bit tag, followed by a number of data bits. The
|
||||
bit length of chunks is divisible by 8 - i.e. all chunks are byte aligned. All
|
||||
values encoded in these data bits have the most significant bit on the left.
|
||||
|
||||
The 8-bit tags have precedence over the 2-bit tags. A decoder must check for the
|
||||
presence of an 8-bit tag first.
|
||||
|
||||
The byte stream's end is marked with 7 0x00 bytes followed a single 0x01 byte.
|
||||
|
||||
|
||||
The possible chunks are:
|
||||
|
||||
|
||||
.- QOI_OP_INDEX ----------.
|
||||
| Byte[0] |
|
||||
| 7 6 5 4 3 2 1 0 |
|
||||
|-------+-----------------|
|
||||
| 0 0 | index |
|
||||
`-------------------------`
|
||||
2-bit tag b00
|
||||
6-bit index into the color index array: 0..63
|
||||
|
||||
A valid encoder must not issue 2 or more consecutive QOI_OP_INDEX chunks to the
|
||||
same index. QOI_OP_RUN should be used instead.
|
||||
|
||||
|
||||
.- QOI_OP_DIFF -----------.
|
||||
| Byte[0] |
|
||||
| 7 6 5 4 3 2 1 0 |
|
||||
|-------+-----+-----+-----|
|
||||
| 0 1 | dr | dg | db |
|
||||
`-------------------------`
|
||||
2-bit tag b01
|
||||
2-bit red channel difference from the previous pixel between -2..1
|
||||
2-bit green channel difference from the previous pixel between -2..1
|
||||
2-bit blue channel difference from the previous pixel between -2..1
|
||||
|
||||
The difference to the current channel values are using a wraparound operation,
|
||||
so "1 - 2" will result in 255, while "255 + 1" will result in 0.
|
||||
|
||||
Values are stored as unsigned integers with a bias of 2. E.g. -2 is stored as
|
||||
0 (b00). 1 is stored as 3 (b11).
|
||||
|
||||
The alpha value remains unchanged from the previous pixel.
|
||||
|
||||
|
||||
.- QOI_OP_LUMA -------------------------------------.
|
||||
| Byte[0] | Byte[1] |
|
||||
| 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 |
|
||||
|-------+-----------------+-------------+-----------|
|
||||
| 1 0 | green diff | dr - dg | db - dg |
|
||||
`---------------------------------------------------`
|
||||
2-bit tag b10
|
||||
6-bit green channel difference from the previous pixel -32..31
|
||||
4-bit red channel difference minus green channel difference -8..7
|
||||
4-bit blue channel difference minus green channel difference -8..7
|
||||
|
||||
The green channel is used to indicate the general direction of change and is
|
||||
encoded in 6 bits. The red and blue channels (dr and db) base their diffs off
|
||||
of the green channel difference and are encoded in 4 bits. I.e.:
|
||||
dr_dg = (cur_px.r - prev_px.r) - (cur_px.g - prev_px.g)
|
||||
db_dg = (cur_px.b - prev_px.b) - (cur_px.g - prev_px.g)
|
||||
|
||||
The difference to the current channel values are using a wraparound operation,
|
||||
so "10 - 13" will result in 253, while "250 + 7" will result in 1.
|
||||
|
||||
Values are stored as unsigned integers with a bias of 32 for the green channel
|
||||
and a bias of 8 for the red and blue channel.
|
||||
|
||||
The alpha value remains unchanged from the previous pixel.
|
||||
|
||||
|
||||
.- QOI_OP_RUN ------------.
|
||||
| Byte[0] |
|
||||
| 7 6 5 4 3 2 1 0 |
|
||||
|-------+-----------------|
|
||||
| 1 1 | run |
|
||||
`-------------------------`
|
||||
2-bit tag b11
|
||||
6-bit run-length repeating the previous pixel: 1..62
|
||||
|
||||
The run-length is stored with a bias of -1. Note that the run-lengths 63 and 64
|
||||
(b111110 and b111111) are illegal as they are occupied by the QOI_OP_RGB and
|
||||
QOI_OP_RGBA tags.
|
||||
|
||||
|
||||
.- QOI_OP_RGB ------------------------------------------.
|
||||
| Byte[0] | Byte[1] | Byte[2] | Byte[3] |
|
||||
| 7 6 5 4 3 2 1 0 | 7 .. 0 | 7 .. 0 | 7 .. 0 |
|
||||
|-------------------------+---------+---------+---------|
|
||||
| 1 1 1 1 1 1 1 0 | red | green | blue |
|
||||
`-------------------------------------------------------`
|
||||
8-bit tag b11111110
|
||||
8-bit red channel value
|
||||
8-bit green channel value
|
||||
8-bit blue channel value
|
||||
|
||||
The alpha value remains unchanged from the previous pixel.
|
||||
|
||||
|
||||
.- QOI_OP_RGBA ---------------------------------------------------.
|
||||
| Byte[0] | Byte[1] | Byte[2] | Byte[3] | Byte[4] |
|
||||
| 7 6 5 4 3 2 1 0 | 7 .. 0 | 7 .. 0 | 7 .. 0 | 7 .. 0 |
|
||||
|-------------------------+---------+---------+---------+---------|
|
||||
| 1 1 1 1 1 1 1 1 | red | green | blue | alpha |
|
||||
`-----------------------------------------------------------------`
|
||||
8-bit tag b11111111
|
||||
8-bit red channel value
|
||||
8-bit green channel value
|
||||
8-bit blue channel value
|
||||
8-bit alpha channel value
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Header - Public functions */
|
||||
|
||||
#ifndef QOI_H
|
||||
#define QOI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* A pointer to a qoi_desc struct has to be supplied to all of qoi's functions.
|
||||
It describes either the input format (for qoi_write and qoi_encode), or is
|
||||
filled with the description read from the file header (for qoi_read and
|
||||
qoi_decode).
|
||||
|
||||
The colorspace in this qoi_desc is an enum where
|
||||
0 = sRGB, i.e. gamma scaled RGB channels and a linear alpha channel
|
||||
1 = all channels are linear
|
||||
You may use the constants QOI_SRGB or QOI_LINEAR. The colorspace is purely
|
||||
informative. It will be saved to the file header, but does not affect
|
||||
how chunks are en-/decoded. */
|
||||
|
||||
#define QOI_SRGB 0
|
||||
#define QOI_LINEAR 1
|
||||
|
||||
typedef struct {
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
unsigned char channels;
|
||||
unsigned char colorspace;
|
||||
} qoi_desc;
|
||||
|
||||
#ifndef QOI_NO_STDIO
|
||||
|
||||
/* Encode raw RGB or RGBA pixels into a QOI image and write it to the file
|
||||
system. The qoi_desc struct must be filled with the image width, height,
|
||||
number of channels (3 = RGB, 4 = RGBA) and the colorspace.
|
||||
|
||||
The function returns 0 on failure (invalid parameters, or fopen or malloc
|
||||
failed) or the number of bytes written on success. */
|
||||
|
||||
int qoi_write(const char *filename, const void *data, const qoi_desc *desc);
|
||||
|
||||
|
||||
/* Read and decode a QOI image from the file system. If channels is 0, the
|
||||
number of channels from the file header is used. If channels is 3 or 4 the
|
||||
output format will be forced into this number of channels.
|
||||
|
||||
The function either returns NULL on failure (invalid data, or malloc or fopen
|
||||
failed) or a pointer to the decoded pixels. On success, the qoi_desc struct
|
||||
will be filled with the description from the file header.
|
||||
|
||||
The returned pixel data should be free()d after use. */
|
||||
|
||||
void *qoi_read(const char *filename, qoi_desc *desc, int channels);
|
||||
|
||||
#endif /* QOI_NO_STDIO */
|
||||
|
||||
|
||||
/* Encode raw RGB or RGBA pixels into a QOI image in memory.
|
||||
|
||||
The function either returns NULL on failure (invalid parameters or malloc
|
||||
failed) or a pointer to the encoded data on success. On success the out_len
|
||||
is set to the size in bytes of the encoded data.
|
||||
|
||||
The returned qoi data should be free()d after use. */
|
||||
|
||||
void *qoi_encode(const void *data, const qoi_desc *desc, int *out_len);
|
||||
|
||||
|
||||
/* Decode a QOI image from memory.
|
||||
|
||||
The function either returns NULL on failure (invalid parameters or malloc
|
||||
failed) or a pointer to the decoded pixels. On success, the qoi_desc struct
|
||||
is filled with the description from the file header.
|
||||
|
||||
The returned pixel data should be free()d after use. */
|
||||
|
||||
void *qoi_decode(const void *data, int size, qoi_desc *desc, int channels);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* QOI_H */
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
Implementation */
|
||||
|
||||
#ifdef QOI_IMPLEMENTATION
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifndef QOI_MALLOC
|
||||
#define QOI_MALLOC(sz) malloc(sz)
|
||||
#define QOI_FREE(p) free(p)
|
||||
#endif
|
||||
#ifndef QOI_ZEROARR
|
||||
#define QOI_ZEROARR(a) memset((a),0,sizeof(a))
|
||||
#endif
|
||||
|
||||
#define QOI_OP_INDEX 0x00 /* 00xxxxxx */
|
||||
#define QOI_OP_DIFF 0x40 /* 01xxxxxx */
|
||||
#define QOI_OP_LUMA 0x80 /* 10xxxxxx */
|
||||
#define QOI_OP_RUN 0xc0 /* 11xxxxxx */
|
||||
#define QOI_OP_RGB 0xfe /* 11111110 */
|
||||
#define QOI_OP_RGBA 0xff /* 11111111 */
|
||||
|
||||
#define QOI_MASK_2 0xc0 /* 11000000 */
|
||||
|
||||
#define QOI_COLOR_HASH(C) (C.rgba.r*3 + C.rgba.g*5 + C.rgba.b*7 + C.rgba.a*11)
|
||||
#define QOI_MAGIC \
|
||||
(((unsigned int)'q') << 24 | ((unsigned int)'o') << 16 | \
|
||||
((unsigned int)'i') << 8 | ((unsigned int)'f'))
|
||||
#define QOI_HEADER_SIZE 14
|
||||
|
||||
/* 2GB is the max file size that this implementation can safely handle. We guard
|
||||
against anything larger than that, assuming the worst case with 5 bytes per
|
||||
pixel, rounded down to a nice clean value. 400 million pixels ought to be
|
||||
enough for anybody. */
|
||||
#define QOI_PIXELS_MAX ((unsigned int)400000000)
|
||||
|
||||
typedef union {
|
||||
struct { unsigned char r, g, b, a; } rgba;
|
||||
unsigned int v;
|
||||
} qoi_rgba_t;
|
||||
|
||||
static const unsigned char qoi_padding[8] = {0,0,0,0,0,0,0,1};
|
||||
|
||||
static void qoi_write_32(unsigned char *bytes, int *p, unsigned int v) {
|
||||
bytes[(*p)++] = (0xff000000 & v) >> 24;
|
||||
bytes[(*p)++] = (0x00ff0000 & v) >> 16;
|
||||
bytes[(*p)++] = (0x0000ff00 & v) >> 8;
|
||||
bytes[(*p)++] = (0x000000ff & v);
|
||||
}
|
||||
|
||||
static unsigned int qoi_read_32(const unsigned char *bytes, int *p) {
|
||||
unsigned int a = bytes[(*p)++];
|
||||
unsigned int b = bytes[(*p)++];
|
||||
unsigned int c = bytes[(*p)++];
|
||||
unsigned int d = bytes[(*p)++];
|
||||
return a << 24 | b << 16 | c << 8 | d;
|
||||
}
|
||||
|
||||
void *qoi_encode(const void *data, const qoi_desc *desc, int *out_len) {
|
||||
int i, max_size, p, run;
|
||||
int px_len, px_end, px_pos, channels;
|
||||
unsigned char *bytes;
|
||||
const unsigned char *pixels;
|
||||
qoi_rgba_t index[64];
|
||||
qoi_rgba_t px, px_prev;
|
||||
|
||||
if (
|
||||
data == NULL || out_len == NULL || desc == NULL ||
|
||||
desc->width == 0 || desc->height == 0 ||
|
||||
desc->channels < 3 || desc->channels > 4 ||
|
||||
desc->colorspace > 1 ||
|
||||
desc->height >= QOI_PIXELS_MAX / desc->width
|
||||
) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
max_size =
|
||||
desc->width * desc->height * (desc->channels + 1) +
|
||||
QOI_HEADER_SIZE + sizeof(qoi_padding);
|
||||
|
||||
p = 0;
|
||||
bytes = (unsigned char *) QOI_MALLOC(max_size);
|
||||
if (!bytes) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qoi_write_32(bytes, &p, QOI_MAGIC);
|
||||
qoi_write_32(bytes, &p, desc->width);
|
||||
qoi_write_32(bytes, &p, desc->height);
|
||||
bytes[p++] = desc->channels;
|
||||
bytes[p++] = desc->colorspace;
|
||||
|
||||
|
||||
pixels = (const unsigned char *)data;
|
||||
|
||||
QOI_ZEROARR(index);
|
||||
|
||||
run = 0;
|
||||
px_prev.rgba.r = 0;
|
||||
px_prev.rgba.g = 0;
|
||||
px_prev.rgba.b = 0;
|
||||
px_prev.rgba.a = 255;
|
||||
px = px_prev;
|
||||
|
||||
px_len = desc->width * desc->height * desc->channels;
|
||||
px_end = px_len - desc->channels;
|
||||
channels = desc->channels;
|
||||
|
||||
for (px_pos = 0; px_pos < px_len; px_pos += channels) {
|
||||
px.rgba.r = pixels[px_pos + 0];
|
||||
px.rgba.g = pixels[px_pos + 1];
|
||||
px.rgba.b = pixels[px_pos + 2];
|
||||
|
||||
if (channels == 4) {
|
||||
px.rgba.a = pixels[px_pos + 3];
|
||||
}
|
||||
|
||||
if (px.v == px_prev.v) {
|
||||
run++;
|
||||
if (run == 62 || px_pos == px_end) {
|
||||
bytes[p++] = QOI_OP_RUN | (run - 1);
|
||||
run = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
int index_pos;
|
||||
|
||||
if (run > 0) {
|
||||
bytes[p++] = QOI_OP_RUN | (run - 1);
|
||||
run = 0;
|
||||
}
|
||||
|
||||
index_pos = QOI_COLOR_HASH(px) & (64 - 1);
|
||||
|
||||
if (index[index_pos].v == px.v) {
|
||||
bytes[p++] = QOI_OP_INDEX | index_pos;
|
||||
}
|
||||
else {
|
||||
index[index_pos] = px;
|
||||
|
||||
if (px.rgba.a == px_prev.rgba.a) {
|
||||
signed char vr = px.rgba.r - px_prev.rgba.r;
|
||||
signed char vg = px.rgba.g - px_prev.rgba.g;
|
||||
signed char vb = px.rgba.b - px_prev.rgba.b;
|
||||
|
||||
signed char vg_r = vr - vg;
|
||||
signed char vg_b = vb - vg;
|
||||
|
||||
if (
|
||||
vr > -3 && vr < 2 &&
|
||||
vg > -3 && vg < 2 &&
|
||||
vb > -3 && vb < 2
|
||||
) {
|
||||
bytes[p++] = QOI_OP_DIFF | (vr + 2) << 4 | (vg + 2) << 2 | (vb + 2);
|
||||
}
|
||||
else if (
|
||||
vg_r > -9 && vg_r < 8 &&
|
||||
vg > -33 && vg < 32 &&
|
||||
vg_b > -9 && vg_b < 8
|
||||
) {
|
||||
bytes[p++] = QOI_OP_LUMA | (vg + 32);
|
||||
bytes[p++] = (vg_r + 8) << 4 | (vg_b + 8);
|
||||
}
|
||||
else {
|
||||
bytes[p++] = QOI_OP_RGB;
|
||||
bytes[p++] = px.rgba.r;
|
||||
bytes[p++] = px.rgba.g;
|
||||
bytes[p++] = px.rgba.b;
|
||||
}
|
||||
}
|
||||
else {
|
||||
bytes[p++] = QOI_OP_RGBA;
|
||||
bytes[p++] = px.rgba.r;
|
||||
bytes[p++] = px.rgba.g;
|
||||
bytes[p++] = px.rgba.b;
|
||||
bytes[p++] = px.rgba.a;
|
||||
}
|
||||
}
|
||||
}
|
||||
px_prev = px;
|
||||
}
|
||||
|
||||
for (i = 0; i < (int)sizeof(qoi_padding); i++) {
|
||||
bytes[p++] = qoi_padding[i];
|
||||
}
|
||||
|
||||
*out_len = p;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
void *qoi_decode(const void *data, int size, qoi_desc *desc, int channels) {
|
||||
const unsigned char *bytes;
|
||||
unsigned int header_magic;
|
||||
unsigned char *pixels;
|
||||
qoi_rgba_t index[64];
|
||||
qoi_rgba_t px;
|
||||
int px_len, chunks_len, px_pos;
|
||||
int p = 0, run = 0;
|
||||
|
||||
if (
|
||||
data == NULL || desc == NULL ||
|
||||
(channels != 0 && channels != 3 && channels != 4) ||
|
||||
size < QOI_HEADER_SIZE + (int)sizeof(qoi_padding)
|
||||
) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bytes = (const unsigned char *)data;
|
||||
|
||||
header_magic = qoi_read_32(bytes, &p);
|
||||
desc->width = qoi_read_32(bytes, &p);
|
||||
desc->height = qoi_read_32(bytes, &p);
|
||||
desc->channels = bytes[p++];
|
||||
desc->colorspace = bytes[p++];
|
||||
|
||||
if (
|
||||
desc->width == 0 || desc->height == 0 ||
|
||||
desc->channels < 3 || desc->channels > 4 ||
|
||||
desc->colorspace > 1 ||
|
||||
header_magic != QOI_MAGIC ||
|
||||
desc->height >= QOI_PIXELS_MAX / desc->width
|
||||
) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (channels == 0) {
|
||||
channels = desc->channels;
|
||||
}
|
||||
|
||||
px_len = desc->width * desc->height * channels;
|
||||
pixels = (unsigned char *) QOI_MALLOC(px_len);
|
||||
if (!pixels) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
QOI_ZEROARR(index);
|
||||
px.rgba.r = 0;
|
||||
px.rgba.g = 0;
|
||||
px.rgba.b = 0;
|
||||
px.rgba.a = 255;
|
||||
|
||||
chunks_len = size - (int)sizeof(qoi_padding);
|
||||
for (px_pos = 0; px_pos < px_len; px_pos += channels) {
|
||||
if (run > 0) {
|
||||
run--;
|
||||
}
|
||||
else if (p < chunks_len) {
|
||||
int b1 = bytes[p++];
|
||||
|
||||
if (b1 == QOI_OP_RGB) {
|
||||
px.rgba.r = bytes[p++];
|
||||
px.rgba.g = bytes[p++];
|
||||
px.rgba.b = bytes[p++];
|
||||
}
|
||||
else if (b1 == QOI_OP_RGBA) {
|
||||
px.rgba.r = bytes[p++];
|
||||
px.rgba.g = bytes[p++];
|
||||
px.rgba.b = bytes[p++];
|
||||
px.rgba.a = bytes[p++];
|
||||
}
|
||||
else if ((b1 & QOI_MASK_2) == QOI_OP_INDEX) {
|
||||
px = index[b1];
|
||||
}
|
||||
else if ((b1 & QOI_MASK_2) == QOI_OP_DIFF) {
|
||||
px.rgba.r += ((b1 >> 4) & 0x03) - 2;
|
||||
px.rgba.g += ((b1 >> 2) & 0x03) - 2;
|
||||
px.rgba.b += ( b1 & 0x03) - 2;
|
||||
}
|
||||
else if ((b1 & QOI_MASK_2) == QOI_OP_LUMA) {
|
||||
int b2 = bytes[p++];
|
||||
int vg = (b1 & 0x3f) - 32;
|
||||
px.rgba.r += vg - 8 + ((b2 >> 4) & 0x0f);
|
||||
px.rgba.g += vg;
|
||||
px.rgba.b += vg - 8 + (b2 & 0x0f);
|
||||
}
|
||||
else if ((b1 & QOI_MASK_2) == QOI_OP_RUN) {
|
||||
run = (b1 & 0x3f);
|
||||
}
|
||||
|
||||
index[QOI_COLOR_HASH(px) & (64 - 1)] = px;
|
||||
}
|
||||
|
||||
pixels[px_pos + 0] = px.rgba.r;
|
||||
pixels[px_pos + 1] = px.rgba.g;
|
||||
pixels[px_pos + 2] = px.rgba.b;
|
||||
|
||||
if (channels == 4) {
|
||||
pixels[px_pos + 3] = px.rgba.a;
|
||||
}
|
||||
}
|
||||
|
||||
return pixels;
|
||||
}
|
||||
|
||||
#ifndef QOI_NO_STDIO
|
||||
#include <stdio.h>
|
||||
|
||||
int qoi_write(const char *filename, const void *data, const qoi_desc *desc) {
|
||||
FILE *f = fopen(filename, "wb");
|
||||
int size, err;
|
||||
void *encoded;
|
||||
|
||||
if (!f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
encoded = qoi_encode(data, desc, &size);
|
||||
if (!encoded) {
|
||||
fclose(f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fwrite(encoded, 1, size, f);
|
||||
fflush(f);
|
||||
err = ferror(f);
|
||||
fclose(f);
|
||||
|
||||
QOI_FREE(encoded);
|
||||
return err ? 0 : size;
|
||||
}
|
||||
|
||||
void *qoi_read(const char *filename, qoi_desc *desc, int channels) {
|
||||
FILE *f = fopen(filename, "rb");
|
||||
int size, bytes_read;
|
||||
void *pixels, *data;
|
||||
|
||||
if (!f) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fseek(f, 0, SEEK_END);
|
||||
size = ftell(f);
|
||||
if (size <= 0 || fseek(f, 0, SEEK_SET) != 0) {
|
||||
fclose(f);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
data = QOI_MALLOC(size);
|
||||
if (!data) {
|
||||
fclose(f);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bytes_read = fread(data, 1, size, f);
|
||||
fclose(f);
|
||||
pixels = (bytes_read != size) ? NULL : qoi_decode(data, bytes_read, desc, channels);
|
||||
QOI_FREE(data);
|
||||
return pixels;
|
||||
}
|
||||
|
||||
#endif /* QOI_NO_STDIO */
|
||||
#endif /* QOI_IMPLEMENTATION */
|
||||
@@ -38,7 +38,7 @@ function isRecognizedExtension(ext) {
|
||||
|
||||
function find_in_path(filename, exts = []) {
|
||||
if (typeof filename != 'string') return null
|
||||
|
||||
|
||||
if (filename.includes('.')) {
|
||||
var candidate = filename // possibly need "/" ?
|
||||
if (io.exists(candidate) && !io.is_directory(candidate)) return candidate
|
||||
|
||||
32
sdl.h
32
sdl.h
@@ -1,32 +0,0 @@
|
||||
#ifndef QJS_SDL_H
|
||||
#define QJS_SDL_H
|
||||
|
||||
#include <SDL3/SDL.h>
|
||||
#include "cell.h"
|
||||
|
||||
SDL_Window *js2SDL_Window(JSContext *js, JSValue v);
|
||||
JSValue SDL_Window2js(JSContext *js, SDL_Window *w);
|
||||
|
||||
SDL_PixelFormat str2pixelformat(const char *str);
|
||||
SDL_PixelFormat js2pixelformat(JSContext *js, JSValue v);
|
||||
JSValue pixelformat2js(JSContext *js, SDL_PixelFormat format);
|
||||
const char *pixelformat2str(SDL_PixelFormat format);
|
||||
|
||||
// New enum system functions
|
||||
int js2SDL_PixelFormat(JSContext *js, JSValue v);
|
||||
JSValue SDL_PixelFormat2js(JSContext *js, int enumval);
|
||||
SDL_Colorspace str2colorspace(const char *str);
|
||||
SDL_Colorspace js2colorspace(JSContext *js, JSValue v);
|
||||
JSValue colorspace2js(JSContext *js, SDL_Colorspace colorspace);
|
||||
const char *colorspace2str(SDL_Colorspace colorspace);
|
||||
|
||||
// SDL Scale Mode functions
|
||||
SDL_ScaleMode js2SDL_ScaleMode(JSContext *js, JSValue v);
|
||||
JSValue SDL_ScaleMode2js(JSContext *js, SDL_ScaleMode mode);
|
||||
|
||||
// Surface type
|
||||
typedef struct SDL_Surface SDL_Surface;
|
||||
SDL_Surface *js2SDL_Surface(JSContext *js, JSValue v);
|
||||
JSValue SDL_Surface2js(JSContext *js, SDL_Surface *s);
|
||||
|
||||
#endif
|
||||
418
sdl/audio.c
418
sdl/audio.c
@@ -1,418 +0,0 @@
|
||||
#include <SDL3/SDL.h>
|
||||
#include <SDL3/SDL_audio.h>
|
||||
#include "cell.h"
|
||||
|
||||
#define countof(x) (sizeof(x)/sizeof((x)[0]))
|
||||
|
||||
// Helper functions
|
||||
double js2number(JSContext *js, JSValue v);
|
||||
int js2bool(JSContext *js, JSValue v);
|
||||
|
||||
// Free functions for finalizers
|
||||
void SDL_AudioStream_free(JSRuntime *rt, SDL_AudioStream *stream) {
|
||||
SDL_DestroyAudioStream(stream);
|
||||
}
|
||||
|
||||
// Class definitions
|
||||
QJSCLASS(SDL_AudioStream,)
|
||||
|
||||
// Conversion functions
|
||||
SDL_AudioFormat js2SDL_AudioFormat(JSContext *js, JSValue v) {
|
||||
int fmt = js2number(js, v);
|
||||
return (SDL_AudioFormat)fmt;
|
||||
}
|
||||
|
||||
JSValue SDL_AudioFormat2js(JSContext *js, SDL_AudioFormat fmt) {
|
||||
return JS_NewInt32(js, (int)fmt);
|
||||
}
|
||||
|
||||
SDL_AudioDeviceID js2SDL_AudioDeviceID(JSContext *js, JSValue v) {
|
||||
return (SDL_AudioDeviceID)js2number(js, v);
|
||||
}
|
||||
|
||||
JSValue SDL_AudioDeviceID2js(JSContext *js, SDL_AudioDeviceID id) {
|
||||
return JS_NewInt32(js, (int)id);
|
||||
}
|
||||
|
||||
SDL_AudioSpec js2SDL_AudioSpec(JSContext *js, JSValue v) {
|
||||
SDL_AudioSpec spec = {0};
|
||||
JS_GETPROP(js, spec.format, v, format, SDL_AudioFormat)
|
||||
JS_GETPROP(js, spec.channels, v, channels, number)
|
||||
JS_GETPROP(js, spec.freq, v, freq, number)
|
||||
return spec;
|
||||
}
|
||||
|
||||
JSValue SDL_AudioSpec2js(JSContext *js, SDL_AudioSpec spec) {
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "format", SDL_AudioFormat2js(js, spec.format));
|
||||
JS_SetPropertyStr(js, obj, "channels", JS_NewInt32(js, spec.channels));
|
||||
JS_SetPropertyStr(js, obj, "freq", JS_NewInt32(js, spec.freq));
|
||||
return obj;
|
||||
}
|
||||
|
||||
// Enum mappings for audio formats (simplified)
|
||||
JSValue js_get_audio_drivers(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
int count = SDL_GetNumAudioDrivers();
|
||||
JSValue arr = JS_NewArray(js);
|
||||
for (int i = 0; i < count; i++) {
|
||||
const char *driver = SDL_GetAudioDriver(i);
|
||||
JS_SetPropertyUint32(js, arr, i, JS_NewString(js, driver));
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
JSValue js_get_current_audio_driver(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
const char *driver = SDL_GetCurrentAudioDriver();
|
||||
return driver ? JS_NewString(js, driver) : JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_get_audio_playback_devices(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID *devices = SDL_GetAudioPlaybackDevices(NULL);
|
||||
if (!devices) return JS_NULL;
|
||||
JSValue arr = JS_NewArray(js);
|
||||
for (int i = 0; devices[i]; i++) {
|
||||
JS_SetPropertyUint32(js, arr, i, SDL_AudioDeviceID2js(js, devices[i]));
|
||||
}
|
||||
SDL_free(devices);
|
||||
return arr;
|
||||
}
|
||||
|
||||
JSValue js_get_audio_recording_devices(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID *devices = SDL_GetAudioRecordingDevices(NULL);
|
||||
if (!devices) return JS_NULL;
|
||||
JSValue arr = JS_NewArray(js);
|
||||
for (int i = 0; devices[i]; i++) {
|
||||
JS_SetPropertyUint32(js, arr, i, SDL_AudioDeviceID2js(js, devices[i]));
|
||||
}
|
||||
SDL_free(devices);
|
||||
return arr;
|
||||
}
|
||||
|
||||
JSValue js_get_audio_device_name(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
const char *name = SDL_GetAudioDeviceName(devid);
|
||||
return name ? JS_NewString(js, name) : JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_is_audio_device_playback(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
return JS_NewBool(js, SDL_IsAudioDevicePlayback(devid));
|
||||
}
|
||||
|
||||
JSValue js_is_audio_device_physical(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
return JS_NewBool(js, SDL_IsAudioDevicePhysical(devid));
|
||||
}
|
||||
|
||||
JSValue js_get_audio_device_format(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
SDL_AudioSpec spec;
|
||||
if (!SDL_GetAudioDeviceFormat(devid, &spec, NULL)) {
|
||||
return JS_NULL;
|
||||
}
|
||||
return SDL_AudioSpec2js(js, spec);
|
||||
}
|
||||
|
||||
JSValue js_open_audio_device_stream(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
SDL_AudioSpec spec = {0};
|
||||
if (argc > 1) {
|
||||
spec = js2SDL_AudioSpec(js, argv[1]);
|
||||
}
|
||||
SDL_AudioStream *stream = SDL_OpenAudioDeviceStream(devid, &spec, NULL, NULL);
|
||||
if (!stream) {
|
||||
return JS_ThrowInternalError(js, "Failed to open audio device stream: %s", SDL_GetError());
|
||||
}
|
||||
return SDL_AudioStream2js(js, stream);
|
||||
}
|
||||
|
||||
JSValue js_create_audio_stream(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_AudioSpec src_spec = js2SDL_AudioSpec(js, argv[0]);
|
||||
SDL_AudioSpec dst_spec = js2SDL_AudioSpec(js, argv[1]);
|
||||
SDL_AudioStream *stream = SDL_CreateAudioStream(&src_spec, &dst_spec);
|
||||
if (!stream) {
|
||||
return JS_ThrowInternalError(js, "Failed to create audio stream: %s", SDL_GetError());
|
||||
}
|
||||
return SDL_AudioStream2js(js, stream);
|
||||
}
|
||||
|
||||
JSC_CCALL(audio_stream_put_data,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
size_t len;
|
||||
void *data = js_get_blob_data(js, &len, argv[0]);
|
||||
if (data == -1)
|
||||
return JS_EXCEPTION;
|
||||
if (!data)
|
||||
return JS_ThrowReferenceError(js, "invalid audio stream data");
|
||||
if (!SDL_PutAudioStreamData(stream, data, len))
|
||||
return JS_ThrowInternalError(js, "Failed to put audio stream data: %s", SDL_GetError());
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_get_data,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
int len = js2number(js, argv[0]);
|
||||
void *data = malloc(len);
|
||||
int got = SDL_GetAudioStreamData(stream, data, len);
|
||||
if (got < 0) {
|
||||
free(data);
|
||||
ret = JS_ThrowInternalError(js, "Failed to get audio stream data: %s", SDL_GetError());
|
||||
} else {
|
||||
ret = js_new_blob_stoned_copy(js, data, got);
|
||||
free(data);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_available,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
ret = JS_NewInt32(js, SDL_GetAudioStreamAvailable(stream));
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_queued,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
ret = JS_NewInt32(js, SDL_GetAudioStreamQueued(stream));
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_flush,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_FlushAudioStream(stream);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_clear,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_ClearAudioStream(stream);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_bind,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
if (!SDL_BindAudioStream(devid, stream)) {
|
||||
ret = JS_ThrowInternalError(js, "Failed to bind audio stream: %s", SDL_GetError());
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_unbind,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_UnbindAudioStream(stream);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_get_format,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_AudioSpec src, dst;
|
||||
if (!SDL_GetAudioStreamFormat(stream, &src, &dst)) {
|
||||
ret = JS_NULL;
|
||||
} else {
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "src", SDL_AudioSpec2js(js, src));
|
||||
JS_SetPropertyStr(js, obj, "dst", SDL_AudioSpec2js(js, dst));
|
||||
ret = obj;
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_get_device,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_AudioDeviceID devid = SDL_GetAudioStreamDevice(stream);
|
||||
ret = SDL_AudioDeviceID2js(js, devid);
|
||||
)
|
||||
|
||||
JSValue js_audio_stream_get_gain(JSContext *js, JSValue self) {
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
return JS_NewFloat64(js, SDL_GetAudioStreamGain(stream));
|
||||
}
|
||||
|
||||
JSValue js_audio_stream_set_gain(JSContext *js, JSValue self, JSValue val) {
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
float gain = js2number(js, val);
|
||||
if (!SDL_SetAudioStreamGain(stream, gain)) {
|
||||
return JS_ThrowInternalError(js, "Failed to set audio stream gain: %s", SDL_GetError());
|
||||
}
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_audio_stream_get_frequency_ratio(JSContext *js, JSValue self) {
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
return JS_NewFloat64(js, SDL_GetAudioStreamFrequencyRatio(stream));
|
||||
}
|
||||
|
||||
JSValue js_audio_stream_set_frequency_ratio(JSContext *js, JSValue self, JSValue val) {
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
float ratio = js2number(js, val);
|
||||
if (!SDL_SetAudioStreamFrequencyRatio(stream, ratio)) {
|
||||
return JS_ThrowInternalError(js, "Failed to set audio stream frequency ratio: %s", SDL_GetError());
|
||||
}
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSC_CCALL(audio_device_pause,
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
SDL_PauseAudioDevice(devid);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_device_resume,
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
SDL_ResumeAudioDevice(devid);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_device_paused,
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
ret = JS_NewBool(js, SDL_AudioDevicePaused(devid));
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_device_paused,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
ret = JS_NewBool(js, SDL_AudioStreamDevicePaused(stream));
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_pause_device,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_PauseAudioStreamDevice(stream);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_stream_resume_device,
|
||||
SDL_AudioStream *stream = js2SDL_AudioStream(js, self);
|
||||
SDL_ResumeAudioStreamDevice(stream);
|
||||
)
|
||||
|
||||
JSC_CCALL(audio_device_close,
|
||||
SDL_AudioDeviceID devid = js2SDL_AudioDeviceID(js, argv[0]);
|
||||
SDL_CloseAudioDevice(devid);
|
||||
)
|
||||
|
||||
// Helper to open a stream on the default playback or recording device
|
||||
// open_stream("playback") or open_stream("recording")
|
||||
JSValue js_open_stream(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
const char *type = JS_ToCString(js, argv[0]);
|
||||
if (!type) return JS_EXCEPTION;
|
||||
|
||||
SDL_AudioDeviceID devid;
|
||||
if (strcmp(type, "playback") == 0) {
|
||||
devid = SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK;
|
||||
} else if (strcmp(type, "recording") == 0) {
|
||||
devid = SDL_AUDIO_DEVICE_DEFAULT_RECORDING;
|
||||
} else {
|
||||
JS_FreeCString(js, type);
|
||||
return JS_ThrowTypeError(js, "open_stream: type must be 'playback' or 'recording'");
|
||||
}
|
||||
JS_FreeCString(js, type);
|
||||
|
||||
// Create stream with default spec (will be set by set_format)
|
||||
SDL_AudioSpec spec = {0};
|
||||
spec.format = SDL_AUDIO_F32;
|
||||
spec.channels = 2;
|
||||
spec.freq = 44100;
|
||||
|
||||
SDL_AudioStream *stream = SDL_OpenAudioDeviceStream(devid, &spec, NULL, NULL);
|
||||
if (!stream) {
|
||||
return JS_ThrowInternalError(js, "Failed to open audio stream: %s", SDL_GetError());
|
||||
}
|
||||
return SDL_AudioStream2js(js, stream);
|
||||
}
|
||||
|
||||
JSValue js_load_wav(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
const char *path = JS_ToCString(js, argv[0]);
|
||||
SDL_AudioSpec spec;
|
||||
Uint8 *data;
|
||||
Uint32 len;
|
||||
if (!SDL_LoadWAV(path, &spec, &data, &len)) {
|
||||
JS_FreeCString(js, path);
|
||||
return JS_ThrowInternalError(js, "Failed to load WAV: %s", SDL_GetError());
|
||||
}
|
||||
JS_FreeCString(js, path);
|
||||
|
||||
JSValue obj = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, obj, "spec", SDL_AudioSpec2js(js, spec));
|
||||
JS_SetPropertyStr(js, obj, "data", js_new_blob_stoned_copy(js, data, len));
|
||||
SDL_free(data);
|
||||
return obj;
|
||||
}
|
||||
|
||||
JSC_CCALL(convert_audio_samples,
|
||||
SDL_AudioSpec src_spec = js2SDL_AudioSpec(js, argv[0]);
|
||||
SDL_AudioSpec dst_spec = js2SDL_AudioSpec(js, argv[1]);
|
||||
size_t src_len;
|
||||
void *src_data = js_get_blob_data(js, &src_len, argv[2]);
|
||||
if (src_data == -1 || !src_data) {
|
||||
ret = JS_EXCEPTION;
|
||||
} else {
|
||||
Uint8 *dst_data = NULL;
|
||||
int dst_len = 0;
|
||||
if (!SDL_ConvertAudioSamples(&src_spec, src_data, (int)src_len, &dst_spec, &dst_data, &dst_len)) {
|
||||
ret = JS_ThrowInternalError(js, "Failed to convert audio samples: %s", SDL_GetError());
|
||||
} else {
|
||||
ret = js_new_blob_stoned_copy(js, dst_data, dst_len);
|
||||
SDL_free(dst_data);
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(mix_audio,
|
||||
SDL_AudioFormat format = js2SDL_AudioFormat(js, argv[0]);
|
||||
size_t dst_len, src_len;
|
||||
void *dst = js_get_blob_data(js, &dst_len, argv[1]);
|
||||
if (dst == -1 || !dst)
|
||||
return JS_EXCEPTION;
|
||||
void *src = js_get_blob_data(js, &src_len, argv[2]);
|
||||
if (src == -1 || !src)
|
||||
return JS_EXCEPTION;
|
||||
if (dst_len == 0)
|
||||
return JS_ThrowInternalError(js, "No destination audio data provided");
|
||||
if (src_len == 0)
|
||||
return JS_ThrowInternalError(js, "No source audio data provided");
|
||||
if (dst_len != src_len)
|
||||
return JS_ThrowInternalError(js, "Source and destination audio data must be the same length");
|
||||
float volume = js2number(js, argv[3]);
|
||||
SDL_MixAudio(dst, src, format, dst_len, volume);
|
||||
)
|
||||
|
||||
// Function list for SDL_AudioStream
|
||||
static const JSCFunctionListEntry js_SDL_AudioStream_funcs[] = {
|
||||
JS_CFUNC_DEF("put", 1, js_audio_stream_put_data),
|
||||
JS_CFUNC_DEF("get", 1, js_audio_stream_get_data),
|
||||
JS_CFUNC_DEF("available", 0, js_audio_stream_available),
|
||||
JS_CFUNC_DEF("queued", 0, js_audio_stream_queued),
|
||||
JS_CFUNC_DEF("flush", 0, js_audio_stream_flush),
|
||||
JS_CFUNC_DEF("clear", 0, js_audio_stream_clear),
|
||||
JS_CFUNC_DEF("bind", 1, js_audio_stream_bind),
|
||||
JS_CFUNC_DEF("unbind", 0, js_audio_stream_unbind),
|
||||
JS_CFUNC_DEF("get_format", 0, js_audio_stream_get_format),
|
||||
JS_CFUNC_DEF("get_device", 0, js_audio_stream_get_device),
|
||||
JS_CGETSET_DEF("gain", js_audio_stream_get_gain, js_audio_stream_set_gain),
|
||||
JS_CGETSET_DEF("frequency_ratio", js_audio_stream_get_frequency_ratio, js_audio_stream_set_frequency_ratio),
|
||||
JS_CFUNC_DEF("pause_device", 0, js_audio_stream_pause_device),
|
||||
JS_CFUNC_DEF("resume_device", 0, js_audio_stream_resume_device),
|
||||
JS_CFUNC_DEF("device_paused", 0, js_audio_stream_device_paused),
|
||||
};
|
||||
|
||||
// Main function list
|
||||
static const JSCFunctionListEntry js_sdl_audio_funcs[] = {
|
||||
JS_CFUNC_DEF("get_drivers", 0, js_get_audio_drivers),
|
||||
JS_CFUNC_DEF("get_current_driver", 0, js_get_current_audio_driver),
|
||||
JS_CFUNC_DEF("get_playback_devices", 0, js_get_audio_playback_devices),
|
||||
JS_CFUNC_DEF("get_recording_devices", 0, js_get_audio_recording_devices),
|
||||
JS_CFUNC_DEF("get_device_name", 1, js_get_audio_device_name),
|
||||
JS_CFUNC_DEF("is_playback_device", 1, js_is_audio_device_playback),
|
||||
JS_CFUNC_DEF("is_physical_device", 1, js_is_audio_device_physical),
|
||||
JS_CFUNC_DEF("get_device_format", 1, js_get_audio_device_format),
|
||||
JS_CFUNC_DEF("open_device_stream", 1, js_open_audio_device_stream),
|
||||
JS_CFUNC_DEF("open_stream", 1, js_open_stream),
|
||||
JS_CFUNC_DEF("create_stream", 2, js_create_audio_stream),
|
||||
JS_CFUNC_DEF("pause_device", 1, js_audio_device_pause),
|
||||
JS_CFUNC_DEF("resume_device", 1, js_audio_device_resume),
|
||||
JS_CFUNC_DEF("device_paused", 1, js_audio_device_paused),
|
||||
JS_CFUNC_DEF("close_device", 1, js_audio_device_close),
|
||||
JS_CFUNC_DEF("load_wav", 1, js_load_wav),
|
||||
JS_CFUNC_DEF("convert_samples", 3, js_convert_audio_samples),
|
||||
JS_CFUNC_DEF("mix_audio", 4, js_mix_audio),
|
||||
};
|
||||
|
||||
CELL_USE_INIT(
|
||||
SDL_Init(SDL_INIT_AUDIO);
|
||||
JS_NewClassID(&js_SDL_AudioStream_id);
|
||||
JS_NewClass(JS_GetRuntime(js), js_SDL_AudioStream_id, &js_SDL_AudioStream_class);
|
||||
JSValue proto = JS_NewObject(js);
|
||||
JS_SetPropertyFunctionList(js, proto, js_SDL_AudioStream_funcs, countof(js_SDL_AudioStream_funcs));
|
||||
JS_SetClassProto(js, js_SDL_AudioStream_id, proto);
|
||||
|
||||
JSValue export = JS_NewObject(js);
|
||||
JS_SetPropertyFunctionList(js, export, js_sdl_audio_funcs, countof(js_sdl_audio_funcs));
|
||||
return export;
|
||||
)
|
||||
781
sdl/input.c
781
sdl/input.c
@@ -1,781 +0,0 @@
|
||||
#include "cell.h"
|
||||
#include "stb_ds.h"
|
||||
#include "wota.h"
|
||||
|
||||
#include <SDL3/SDL.h>
|
||||
|
||||
// Internal keymod function for input module
|
||||
static JSValue js_keymod(JSContext *js)
|
||||
{
|
||||
SDL_Keymod modstate = SDL_GetModState();
|
||||
JSValue ret = JS_NewObject(js);
|
||||
if (SDL_KMOD_CTRL & modstate)
|
||||
JS_SetPropertyStr(js,ret,"ctrl", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_SHIFT & modstate)
|
||||
JS_SetPropertyStr(js,ret,"shift", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_ALT & modstate)
|
||||
JS_SetPropertyStr(js,ret,"alt", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_GUI & modstate)
|
||||
JS_SetPropertyStr(js,ret,"super", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_NUM & modstate)
|
||||
JS_SetPropertyStr(js,ret,"numlock", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_CAPS & modstate)
|
||||
JS_SetPropertyStr(js,ret,"caps", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_SCROLL & modstate)
|
||||
JS_SetPropertyStr(js,ret,"scrolllock", JS_NewBool(js,1));
|
||||
if (SDL_KMOD_MODE & modstate)
|
||||
JS_SetPropertyStr(js,ret,"mode", JS_NewBool(js,1));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// INPUT FUNCTIONS
|
||||
JSC_CCALL(input_mouse_lock, SDL_CaptureMouse(JS_ToBool(js,argv[0])))
|
||||
|
||||
JSC_CCALL(input_mouse_show,
|
||||
if (JS_ToBool(js,argv[0]))
|
||||
SDL_ShowCursor();
|
||||
else
|
||||
SDL_HideCursor();
|
||||
)
|
||||
|
||||
JSC_CCALL(input_keyname,
|
||||
return JS_NewString(js, SDL_GetKeyName(js2number(js,argv[0])));
|
||||
)
|
||||
|
||||
JSC_CCALL(input_keymod,
|
||||
return js_keymod(js);
|
||||
)
|
||||
|
||||
JSC_CCALL(input_mousestate,
|
||||
float x,y;
|
||||
SDL_MouseButtonFlags flags = SDL_GetMouseState(&x,&y);
|
||||
JSValue m = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js,m,"x", number2js(js,x));
|
||||
JS_SetPropertyStr(js,m,"y", number2js(js,y));
|
||||
|
||||
if (flags & SDL_BUTTON_LMASK)
|
||||
JS_SetPropertyStr(js, m, "left", JS_NewBool(js, 1));
|
||||
if (flags & SDL_BUTTON_MMASK)
|
||||
JS_SetPropertyStr(js, m, "middle", JS_NewBool(js, 1));
|
||||
if (flags & SDL_BUTTON_RMASK)
|
||||
JS_SetPropertyStr(js, m, "right", JS_NewBool(js, 1));
|
||||
if (flags & SDL_BUTTON_X1MASK)
|
||||
JS_SetPropertyStr(js, m, "x1", JS_NewBool(js, 1));
|
||||
if (flags & SDL_BUTTON_X2MASK)
|
||||
JS_SetPropertyStr(js, m, "x2", JS_NewBool(js, 1));
|
||||
|
||||
return m;
|
||||
)
|
||||
|
||||
// Event processing functions (moved from cell.c)
|
||||
|
||||
const char* event_type_to_string(Uint32 event_type) {
|
||||
switch (event_type) {
|
||||
// Application events
|
||||
case SDL_EVENT_QUIT: return "quit";
|
||||
case SDL_EVENT_TERMINATING: return "terminating";
|
||||
case SDL_EVENT_LOW_MEMORY: return "low_memory";
|
||||
case SDL_EVENT_WILL_ENTER_BACKGROUND: return "will_enter_background";
|
||||
case SDL_EVENT_DID_ENTER_BACKGROUND: return "did_enter_background";
|
||||
case SDL_EVENT_WILL_ENTER_FOREGROUND: return "will_enter_foreground";
|
||||
case SDL_EVENT_DID_ENTER_FOREGROUND: return "did_enter_foreground";
|
||||
case SDL_EVENT_LOCALE_CHANGED: return "locale_changed";
|
||||
case SDL_EVENT_SYSTEM_THEME_CHANGED: return "system_theme_changed";
|
||||
|
||||
// Display events
|
||||
case SDL_EVENT_DISPLAY_ORIENTATION: return "display_orientation";
|
||||
case SDL_EVENT_DISPLAY_ADDED: return "display_added";
|
||||
case SDL_EVENT_DISPLAY_REMOVED: return "display_removed";
|
||||
case SDL_EVENT_DISPLAY_MOVED: return "display_moved";
|
||||
case SDL_EVENT_DISPLAY_DESKTOP_MODE_CHANGED: return "display_desktop_mode_changed";
|
||||
case SDL_EVENT_DISPLAY_CURRENT_MODE_CHANGED: return "display_current_mode_changed";
|
||||
case SDL_EVENT_DISPLAY_CONTENT_SCALE_CHANGED: return "display_content_scale_changed";
|
||||
|
||||
// Window events
|
||||
case SDL_EVENT_WINDOW_SHOWN: return "window_shown";
|
||||
case SDL_EVENT_WINDOW_HIDDEN: return "window_hidden";
|
||||
case SDL_EVENT_WINDOW_EXPOSED: return "window_exposed";
|
||||
case SDL_EVENT_WINDOW_MOVED: return "window_moved";
|
||||
case SDL_EVENT_WINDOW_RESIZED: return "window_resized";
|
||||
case SDL_EVENT_WINDOW_PIXEL_SIZE_CHANGED: return "window_pixel_size_changed";
|
||||
case SDL_EVENT_WINDOW_METAL_VIEW_RESIZED: return "window_metal_view_resized";
|
||||
case SDL_EVENT_WINDOW_MINIMIZED: return "window_minimized";
|
||||
case SDL_EVENT_WINDOW_MAXIMIZED: return "window_maximized";
|
||||
case SDL_EVENT_WINDOW_RESTORED: return "window_restored";
|
||||
case SDL_EVENT_WINDOW_MOUSE_ENTER: return "window_mouse_enter";
|
||||
case SDL_EVENT_WINDOW_MOUSE_LEAVE: return "window_mouse_leave";
|
||||
case SDL_EVENT_WINDOW_FOCUS_GAINED: return "window_focus_gained";
|
||||
case SDL_EVENT_WINDOW_FOCUS_LOST: return "window_focus_lost";
|
||||
case SDL_EVENT_WINDOW_CLOSE_REQUESTED: return "window_close_requested";
|
||||
case SDL_EVENT_WINDOW_HIT_TEST: return "window_hit_test";
|
||||
case SDL_EVENT_WINDOW_ICCPROF_CHANGED: return "window_iccprof_changed";
|
||||
case SDL_EVENT_WINDOW_DISPLAY_CHANGED: return "window_display_changed";
|
||||
case SDL_EVENT_WINDOW_DISPLAY_SCALE_CHANGED: return "window_display_scale_changed";
|
||||
case SDL_EVENT_WINDOW_SAFE_AREA_CHANGED: return "window_safe_area_changed";
|
||||
case SDL_EVENT_WINDOW_OCCLUDED: return "window_occluded";
|
||||
case SDL_EVENT_WINDOW_ENTER_FULLSCREEN: return "window_enter_fullscreen";
|
||||
case SDL_EVENT_WINDOW_LEAVE_FULLSCREEN: return "window_leave_fullscreen";
|
||||
case SDL_EVENT_WINDOW_DESTROYED: return "window_destroyed";
|
||||
case SDL_EVENT_WINDOW_HDR_STATE_CHANGED: return "window_hdr_state_changed";
|
||||
|
||||
// Keyboard events
|
||||
case SDL_EVENT_KEY_DOWN: return "key_down";
|
||||
case SDL_EVENT_KEY_UP: return "key_up";
|
||||
case SDL_EVENT_TEXT_EDITING: return "text_editing";
|
||||
case SDL_EVENT_TEXT_INPUT: return "text_input";
|
||||
case SDL_EVENT_KEYMAP_CHANGED: return "keymap_changed";
|
||||
case SDL_EVENT_KEYBOARD_ADDED: return "keyboard_added";
|
||||
case SDL_EVENT_KEYBOARD_REMOVED: return "keyboard_removed";
|
||||
case SDL_EVENT_TEXT_EDITING_CANDIDATES: return "text_editing_candidates";
|
||||
|
||||
// Mouse events
|
||||
case SDL_EVENT_MOUSE_MOTION: return "mouse_motion";
|
||||
case SDL_EVENT_MOUSE_BUTTON_DOWN: return "mouse_button_down";
|
||||
case SDL_EVENT_MOUSE_BUTTON_UP: return "mouse_button_up";
|
||||
case SDL_EVENT_MOUSE_WHEEL: return "mouse_wheel";
|
||||
case SDL_EVENT_MOUSE_ADDED: return "mouse_added";
|
||||
case SDL_EVENT_MOUSE_REMOVED: return "mouse_removed";
|
||||
|
||||
// Joystick events
|
||||
case SDL_EVENT_JOYSTICK_AXIS_MOTION: return "joystick_axis_motion";
|
||||
case SDL_EVENT_JOYSTICK_BALL_MOTION: return "joystick_ball_motion";
|
||||
case SDL_EVENT_JOYSTICK_HAT_MOTION: return "joystick_hat_motion";
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_DOWN: return "joystick_button_down";
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_UP: return "joystick_button_up";
|
||||
case SDL_EVENT_JOYSTICK_ADDED: return "joystick_added";
|
||||
case SDL_EVENT_JOYSTICK_REMOVED: return "joystick_removed";
|
||||
case SDL_EVENT_JOYSTICK_BATTERY_UPDATED: return "joystick_battery_updated";
|
||||
case SDL_EVENT_JOYSTICK_UPDATE_COMPLETE: return "joystick_update_complete";
|
||||
|
||||
// Gamepad events
|
||||
case SDL_EVENT_GAMEPAD_AXIS_MOTION: return "gamepad_axis_motion";
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_DOWN: return "gamepad_button_down";
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_UP: return "gamepad_button_up";
|
||||
case SDL_EVENT_GAMEPAD_ADDED: return "gamepad_added";
|
||||
case SDL_EVENT_GAMEPAD_REMOVED: return "gamepad_removed";
|
||||
case SDL_EVENT_GAMEPAD_REMAPPED: return "gamepad_remapped";
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_DOWN: return "gamepad_touchpad_down";
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_MOTION: return "gamepad_touchpad_motion";
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_UP: return "gamepad_touchpad_up";
|
||||
case SDL_EVENT_GAMEPAD_SENSOR_UPDATE: return "gamepad_sensor_update";
|
||||
case SDL_EVENT_GAMEPAD_UPDATE_COMPLETE: return "gamepad_update_complete";
|
||||
case SDL_EVENT_GAMEPAD_STEAM_HANDLE_UPDATED: return "gamepad_steam_handle_updated";
|
||||
|
||||
// Touch events
|
||||
case SDL_EVENT_FINGER_DOWN: return "finger_down";
|
||||
case SDL_EVENT_FINGER_UP: return "finger_up";
|
||||
case SDL_EVENT_FINGER_MOTION: return "finger_motion";
|
||||
|
||||
// Clipboard events
|
||||
case SDL_EVENT_CLIPBOARD_UPDATE: return "clipboard_update";
|
||||
|
||||
// Drag and drop events
|
||||
case SDL_EVENT_DROP_FILE: return "drop_file";
|
||||
case SDL_EVENT_DROP_TEXT: return "drop_text";
|
||||
case SDL_EVENT_DROP_BEGIN: return "drop_begin";
|
||||
case SDL_EVENT_DROP_COMPLETE: return "drop_complete";
|
||||
case SDL_EVENT_DROP_POSITION: return "drop_position";
|
||||
|
||||
// Audio device events
|
||||
case SDL_EVENT_AUDIO_DEVICE_ADDED: return "audio_device_added";
|
||||
case SDL_EVENT_AUDIO_DEVICE_REMOVED: return "audio_device_removed";
|
||||
case SDL_EVENT_AUDIO_DEVICE_FORMAT_CHANGED: return "audio_device_format_changed";
|
||||
|
||||
// Sensor events
|
||||
case SDL_EVENT_SENSOR_UPDATE: return "sensor_update";
|
||||
|
||||
// Pen events
|
||||
case SDL_EVENT_PEN_PROXIMITY_IN: return "pen_proximity_in";
|
||||
case SDL_EVENT_PEN_PROXIMITY_OUT: return "pen_proximity_out";
|
||||
case SDL_EVENT_PEN_DOWN: return "pen_down";
|
||||
case SDL_EVENT_PEN_UP: return "pen_up";
|
||||
case SDL_EVENT_PEN_BUTTON_DOWN: return "pen_button_down";
|
||||
case SDL_EVENT_PEN_BUTTON_UP: return "pen_button_up";
|
||||
case SDL_EVENT_PEN_MOTION: return "pen_motion";
|
||||
case SDL_EVENT_PEN_AXIS: return "pen_axis";
|
||||
|
||||
// Camera events
|
||||
case SDL_EVENT_CAMERA_DEVICE_ADDED: return "camera_device_added";
|
||||
case SDL_EVENT_CAMERA_DEVICE_REMOVED: return "camera_device_removed";
|
||||
case SDL_EVENT_CAMERA_DEVICE_APPROVED: return "camera_device_approved";
|
||||
case SDL_EVENT_CAMERA_DEVICE_DENIED: return "camera_device_denied";
|
||||
|
||||
// Render events
|
||||
case SDL_EVENT_RENDER_TARGETS_RESET: return "render_targets_reset";
|
||||
case SDL_EVENT_RENDER_DEVICE_RESET: return "render_device_reset";
|
||||
case SDL_EVENT_RENDER_DEVICE_LOST: return "render_device_lost";
|
||||
|
||||
// User event (assuming it should be included)
|
||||
case SDL_EVENT_USER: return "user";
|
||||
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
const char* mouse_button_to_string(int mouse) {
|
||||
switch (mouse) {
|
||||
case SDL_BUTTON_LEFT: return "left";
|
||||
case SDL_BUTTON_MIDDLE: return "middle";
|
||||
case SDL_BUTTON_RIGHT: return "right";
|
||||
case SDL_BUTTON_X1: return "x1";
|
||||
case SDL_BUTTON_X2: return "x2";
|
||||
default: return "left";
|
||||
}
|
||||
}
|
||||
|
||||
static void wota_write_vec2(WotaBuffer *wb, double x, double y) {
|
||||
// We'll store as WOTA_ARR of length 2, then two numbers
|
||||
wota_write_array(wb, 2);
|
||||
wota_write_number(wb, x);
|
||||
wota_write_number(wb, y);
|
||||
}
|
||||
|
||||
static int event2wota_count_props(const SDL_Event *event)
|
||||
{
|
||||
// We always store at least "type" and "timestamp".
|
||||
int count = 2;
|
||||
|
||||
switch (event->type) {
|
||||
|
||||
case SDL_EVENT_AUDIO_DEVICE_ADDED:
|
||||
case SDL_EVENT_AUDIO_DEVICE_REMOVED:
|
||||
count += 2; // which, recording
|
||||
break;
|
||||
|
||||
case SDL_EVENT_DISPLAY_ORIENTATION:
|
||||
case SDL_EVENT_DISPLAY_ADDED:
|
||||
case SDL_EVENT_DISPLAY_REMOVED:
|
||||
case SDL_EVENT_DISPLAY_MOVED:
|
||||
case SDL_EVENT_DISPLAY_DESKTOP_MODE_CHANGED:
|
||||
case SDL_EVENT_DISPLAY_CURRENT_MODE_CHANGED:
|
||||
case SDL_EVENT_DISPLAY_CONTENT_SCALE_CHANGED:
|
||||
count += 3; // which, orientation/data1, data2
|
||||
break;
|
||||
|
||||
case SDL_EVENT_MOUSE_MOTION:
|
||||
count += 5;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_MOUSE_WHEEL:
|
||||
// window, which, scroll, mouse => 4 extra
|
||||
count += 4;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_MOUSE_BUTTON_UP:
|
||||
case SDL_EVENT_MOUSE_BUTTON_DOWN:
|
||||
// window, which, down, button, clicks, mouse => 6 extra
|
||||
count += 6;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_SENSOR_UPDATE:
|
||||
// which, sensor_timestamp => 2 extra
|
||||
count += 2;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_KEY_DOWN:
|
||||
case SDL_EVENT_KEY_UP:
|
||||
// window, which, down, repeat, key, scancode, mod => 7 extra
|
||||
count += 7;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_FINGER_MOTION:
|
||||
case SDL_EVENT_FINGER_DOWN:
|
||||
case SDL_EVENT_FINGER_UP:
|
||||
// touch, finger, pos, d_pos, pressure, window => 6 extra
|
||||
count += 6;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_DROP_BEGIN:
|
||||
case SDL_EVENT_DROP_FILE:
|
||||
case SDL_EVENT_DROP_TEXT:
|
||||
case SDL_EVENT_DROP_COMPLETE:
|
||||
case SDL_EVENT_DROP_POSITION:
|
||||
// window, pos, data, source => 4 extra
|
||||
count += 4;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_TEXT_INPUT:
|
||||
// window, text, mod => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_CAMERA_DEVICE_APPROVED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_REMOVED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_ADDED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_DENIED:
|
||||
// which => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_CLIPBOARD_UPDATE:
|
||||
// owner => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
|
||||
/* Window events that only need 'which' */
|
||||
case SDL_EVENT_WINDOW_EXPOSED:
|
||||
case SDL_EVENT_WINDOW_FOCUS_GAINED:
|
||||
case SDL_EVENT_WINDOW_FOCUS_LOST:
|
||||
case SDL_EVENT_WINDOW_CLOSE_REQUESTED:
|
||||
// which => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
|
||||
/* Window events that need data1 and data2 */
|
||||
case SDL_EVENT_WINDOW_SHOWN:
|
||||
case SDL_EVENT_WINDOW_HIDDEN:
|
||||
case SDL_EVENT_WINDOW_MOVED:
|
||||
case SDL_EVENT_WINDOW_RESIZED:
|
||||
case SDL_EVENT_WINDOW_PIXEL_SIZE_CHANGED:
|
||||
case SDL_EVENT_WINDOW_METAL_VIEW_RESIZED:
|
||||
case SDL_EVENT_WINDOW_MINIMIZED:
|
||||
case SDL_EVENT_WINDOW_MAXIMIZED:
|
||||
case SDL_EVENT_WINDOW_RESTORED:
|
||||
case SDL_EVENT_WINDOW_MOUSE_ENTER:
|
||||
case SDL_EVENT_WINDOW_MOUSE_LEAVE:
|
||||
case SDL_EVENT_WINDOW_HIT_TEST:
|
||||
case SDL_EVENT_WINDOW_ICCPROF_CHANGED:
|
||||
case SDL_EVENT_WINDOW_DISPLAY_CHANGED:
|
||||
case SDL_EVENT_WINDOW_DISPLAY_SCALE_CHANGED:
|
||||
case SDL_EVENT_WINDOW_OCCLUDED:
|
||||
case SDL_EVENT_WINDOW_ENTER_FULLSCREEN:
|
||||
case SDL_EVENT_WINDOW_LEAVE_FULLSCREEN:
|
||||
case SDL_EVENT_WINDOW_DESTROYED:
|
||||
case SDL_EVENT_WINDOW_HDR_STATE_CHANGED:
|
||||
// which, x/width/display_index, y/height => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_WINDOW_SAFE_AREA_CHANGED:
|
||||
// which, x, y, width, height => 5 extra
|
||||
count += 5;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_JOYSTICK_ADDED:
|
||||
case SDL_EVENT_JOYSTICK_REMOVED:
|
||||
case SDL_EVENT_JOYSTICK_UPDATE_COMPLETE:
|
||||
// which => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_JOYSTICK_AXIS_MOTION:
|
||||
// which, axis, value => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_JOYSTICK_BALL_MOTION:
|
||||
// which, ball, rel => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_DOWN:
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_UP:
|
||||
// which, button, down => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_GAMEPAD_ADDED:
|
||||
case SDL_EVENT_GAMEPAD_REMOVED:
|
||||
case SDL_EVENT_GAMEPAD_REMAPPED:
|
||||
case SDL_EVENT_GAMEPAD_UPDATE_COMPLETE:
|
||||
case SDL_EVENT_GAMEPAD_STEAM_HANDLE_UPDATED:
|
||||
// which => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_GAMEPAD_AXIS_MOTION:
|
||||
// which, axis, value => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_DOWN:
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_UP:
|
||||
// which, button, down => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_DOWN:
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_MOTION:
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_UP:
|
||||
// which, touchpad, finger, pos, pressure => 5 extra
|
||||
count += 5;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_GAMEPAD_SENSOR_UPDATE:
|
||||
// which, sensor, sensor_timestamp => 3 extra
|
||||
count += 3;
|
||||
break;
|
||||
|
||||
case SDL_EVENT_USER:
|
||||
// cb => 1 extra
|
||||
count += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void event2wota_write(WotaBuffer *wb, const SDL_Event *e, int c) {
|
||||
wota_write_record(wb, (unsigned long long)c);
|
||||
wota_write_text(wb, "type");
|
||||
wota_write_text(wb, event_type_to_string(e->type));
|
||||
wota_write_text(wb, "timestamp");
|
||||
wota_write_number(wb, (double)e->common.timestamp);
|
||||
switch(e->type) {
|
||||
case SDL_EVENT_AUDIO_DEVICE_ADDED:
|
||||
case SDL_EVENT_AUDIO_DEVICE_REMOVED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->adevice.which);
|
||||
wota_write_text(wb, "recording");
|
||||
wota_write_sym(wb, e->adevice.recording ? WOTA_TRUE : WOTA_FALSE);
|
||||
break;
|
||||
case SDL_EVENT_DISPLAY_ORIENTATION:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->display.displayID);
|
||||
wota_write_text(wb, "orientation");
|
||||
wota_write_number(wb, (double)e->display.data1);
|
||||
wota_write_text(wb, "data2");
|
||||
wota_write_number(wb, (double)e->display.data2);
|
||||
break;
|
||||
case SDL_EVENT_DISPLAY_ADDED:
|
||||
case SDL_EVENT_DISPLAY_REMOVED:
|
||||
case SDL_EVENT_DISPLAY_MOVED:
|
||||
case SDL_EVENT_DISPLAY_DESKTOP_MODE_CHANGED:
|
||||
case SDL_EVENT_DISPLAY_CURRENT_MODE_CHANGED:
|
||||
case SDL_EVENT_DISPLAY_CONTENT_SCALE_CHANGED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->display.displayID);
|
||||
wota_write_text(wb, "data1");
|
||||
wota_write_number(wb, (double)e->display.data1);
|
||||
wota_write_text(wb, "data2");
|
||||
wota_write_number(wb, (double)e->display.data2);
|
||||
break;
|
||||
case SDL_EVENT_MOUSE_MOTION:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->motion.windowID);
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->motion.which);
|
||||
wota_write_text(wb, "state");
|
||||
wota_write_number(wb, (double)e->motion.state);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->motion.x, (double)e->motion.y);
|
||||
wota_write_text(wb, "d_pos");
|
||||
wota_write_vec2(wb, (double)e->motion.xrel, (double)e->motion.yrel);
|
||||
break;
|
||||
case SDL_EVENT_MOUSE_WHEEL:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->wheel.windowID);
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->wheel.which);
|
||||
wota_write_text(wb, "scroll");
|
||||
wota_write_vec2(wb, (double)e->wheel.x, (double)e->wheel.y);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->wheel.mouse_x, (double)e->wheel.mouse_y);
|
||||
break;
|
||||
case SDL_EVENT_MOUSE_BUTTON_UP:
|
||||
case SDL_EVENT_MOUSE_BUTTON_DOWN:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->button.windowID);
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->button.which);
|
||||
wota_write_text(wb, "down");
|
||||
wota_write_sym(wb, e->button.down ? WOTA_TRUE : WOTA_FALSE);
|
||||
wota_write_text(wb, "button");
|
||||
wota_write_text(wb, mouse_button_to_string(e->button.button));
|
||||
wota_write_text(wb, "clicks");
|
||||
wota_write_number(wb, (double)e->button.clicks);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->button.x, (double)e->button.y);
|
||||
break;
|
||||
case SDL_EVENT_SENSOR_UPDATE:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->sensor.which);
|
||||
wota_write_text(wb, "sensor_timestamp");
|
||||
wota_write_number(wb, (double)e->sensor.sensor_timestamp);
|
||||
break;
|
||||
case SDL_EVENT_KEY_DOWN:
|
||||
case SDL_EVENT_KEY_UP:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->key.windowID);
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->key.which);
|
||||
wota_write_text(wb, "down");
|
||||
wota_write_sym(wb, e->key.down ? WOTA_TRUE : WOTA_FALSE);
|
||||
wota_write_text(wb, "repeat");
|
||||
wota_write_sym(wb, e->key.repeat ? WOTA_TRUE : WOTA_FALSE);
|
||||
wota_write_text(wb, "key");
|
||||
wota_write_number(wb, (double)e->key.key);
|
||||
wota_write_text(wb, "scancode");
|
||||
wota_write_number(wb, (double)e->key.scancode);
|
||||
wota_write_text(wb, "mod");
|
||||
wota_write_number(wb, (double)e->key.mod);
|
||||
break;
|
||||
case SDL_EVENT_FINGER_MOTION:
|
||||
case SDL_EVENT_FINGER_DOWN:
|
||||
case SDL_EVENT_FINGER_UP:
|
||||
wota_write_text(wb, "touch");
|
||||
wota_write_number(wb, (double)e->tfinger.touchID);
|
||||
wota_write_text(wb, "finger");
|
||||
wota_write_number(wb, (double)e->tfinger.fingerID);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->tfinger.x, (double)e->tfinger.y);
|
||||
wota_write_text(wb, "d_pos");
|
||||
wota_write_vec2(wb, (double)e->tfinger.dx, (double)e->tfinger.dy);
|
||||
wota_write_text(wb, "pressure");
|
||||
wota_write_number(wb, (double)e->tfinger.pressure);
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->key.windowID);
|
||||
break;
|
||||
case SDL_EVENT_DROP_BEGIN:
|
||||
case SDL_EVENT_DROP_FILE:
|
||||
case SDL_EVENT_DROP_TEXT:
|
||||
case SDL_EVENT_DROP_COMPLETE:
|
||||
case SDL_EVENT_DROP_POSITION:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->drop.windowID);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->drop.x, (double)e->drop.y);
|
||||
wota_write_text(wb, "data");
|
||||
wota_write_text(wb, e->drop.data ? e->drop.data : "");
|
||||
wota_write_text(wb, "source");
|
||||
wota_write_text(wb, e->drop.source ? e->drop.source : "");
|
||||
break;
|
||||
case SDL_EVENT_TEXT_INPUT:
|
||||
wota_write_text(wb, "window");
|
||||
wota_write_number(wb, (double)e->text.windowID);
|
||||
wota_write_text(wb, "text");
|
||||
wota_write_text(wb, e->text.text);
|
||||
wota_write_text(wb, "mod");
|
||||
wota_write_number(wb, 0);
|
||||
break;
|
||||
case SDL_EVENT_CAMERA_DEVICE_APPROVED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_REMOVED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_ADDED:
|
||||
case SDL_EVENT_CAMERA_DEVICE_DENIED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->cdevice.which);
|
||||
break;
|
||||
case SDL_EVENT_CLIPBOARD_UPDATE:
|
||||
wota_write_text(wb, "owner");
|
||||
wota_write_sym(wb, e->clipboard.owner ? WOTA_TRUE : WOTA_FALSE);
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_EXPOSED:
|
||||
case SDL_EVENT_WINDOW_FOCUS_GAINED:
|
||||
case SDL_EVENT_WINDOW_FOCUS_LOST:
|
||||
case SDL_EVENT_WINDOW_CLOSE_REQUESTED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_SHOWN:
|
||||
case SDL_EVENT_WINDOW_HIDDEN:
|
||||
case SDL_EVENT_WINDOW_MINIMIZED:
|
||||
case SDL_EVENT_WINDOW_MAXIMIZED:
|
||||
case SDL_EVENT_WINDOW_RESTORED:
|
||||
case SDL_EVENT_WINDOW_MOUSE_ENTER:
|
||||
case SDL_EVENT_WINDOW_MOUSE_LEAVE:
|
||||
case SDL_EVENT_WINDOW_HIT_TEST:
|
||||
case SDL_EVENT_WINDOW_ICCPROF_CHANGED:
|
||||
case SDL_EVENT_WINDOW_OCCLUDED:
|
||||
case SDL_EVENT_WINDOW_ENTER_FULLSCREEN:
|
||||
case SDL_EVENT_WINDOW_LEAVE_FULLSCREEN:
|
||||
case SDL_EVENT_WINDOW_DESTROYED:
|
||||
case SDL_EVENT_WINDOW_HDR_STATE_CHANGED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "data1");
|
||||
wota_write_number(wb, (double)e->window.data1);
|
||||
wota_write_text(wb, "data2");
|
||||
wota_write_number(wb, (double)e->window.data2);
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_SAFE_AREA_CHANGED:
|
||||
{
|
||||
SDL_Window *window = SDL_GetWindowFromID(e->window.windowID);
|
||||
SDL_Rect safe_area = {0, 0, 0, 0};
|
||||
if (window && SDL_GetWindowSafeArea(window, &safe_area)) {
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "x");
|
||||
wota_write_number(wb, (double)safe_area.x);
|
||||
wota_write_text(wb, "y");
|
||||
wota_write_number(wb, (double)safe_area.y);
|
||||
wota_write_text(wb, "width");
|
||||
wota_write_number(wb, (double)safe_area.w);
|
||||
wota_write_text(wb, "height");
|
||||
wota_write_number(wb, (double)safe_area.h);
|
||||
} else {
|
||||
// Fallback to original behavior if SDL_GetWindowSafeArea fails
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "data1");
|
||||
wota_write_number(wb, (double)e->window.data1);
|
||||
wota_write_text(wb, "data2");
|
||||
wota_write_number(wb, (double)e->window.data2);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_MOVED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "x");
|
||||
wota_write_number(wb, (double)e->window.data1);
|
||||
wota_write_text(wb, "y");
|
||||
wota_write_number(wb, (double)e->window.data2);
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_RESIZED:
|
||||
case SDL_EVENT_WINDOW_PIXEL_SIZE_CHANGED:
|
||||
case SDL_EVENT_WINDOW_METAL_VIEW_RESIZED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "width");
|
||||
wota_write_number(wb, (double)e->window.data1);
|
||||
wota_write_text(wb, "height");
|
||||
wota_write_number(wb, (double)e->window.data2);
|
||||
break;
|
||||
case SDL_EVENT_WINDOW_DISPLAY_CHANGED:
|
||||
case SDL_EVENT_WINDOW_DISPLAY_SCALE_CHANGED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->window.windowID);
|
||||
wota_write_text(wb, "display_index");
|
||||
wota_write_number(wb, (double)e->window.data1);
|
||||
wota_write_text(wb, "data2");
|
||||
wota_write_number(wb, (double)e->window.data2);
|
||||
break;
|
||||
case SDL_EVENT_JOYSTICK_ADDED:
|
||||
case SDL_EVENT_JOYSTICK_REMOVED:
|
||||
case SDL_EVENT_JOYSTICK_UPDATE_COMPLETE:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->jdevice.which);
|
||||
break;
|
||||
case SDL_EVENT_JOYSTICK_AXIS_MOTION:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->jaxis.which);
|
||||
wota_write_text(wb, "axis");
|
||||
wota_write_number(wb, (double)e->jaxis.axis);
|
||||
wota_write_text(wb, "value");
|
||||
wota_write_number(wb, (double)e->jaxis.value);
|
||||
break;
|
||||
case SDL_EVENT_JOYSTICK_BALL_MOTION:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->jball.which);
|
||||
wota_write_text(wb, "ball");
|
||||
wota_write_number(wb, (double)e->jball.ball);
|
||||
wota_write_text(wb, "rel");
|
||||
wota_write_vec2(wb, (double)e->jball.xrel, (double)e->jball.yrel);
|
||||
break;
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_DOWN:
|
||||
case SDL_EVENT_JOYSTICK_BUTTON_UP:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->jbutton.which);
|
||||
wota_write_text(wb, "button");
|
||||
wota_write_number(wb, (double)e->jbutton.button);
|
||||
wota_write_text(wb, "down");
|
||||
wota_write_sym(wb, e->jbutton.down ? WOTA_TRUE : WOTA_FALSE);
|
||||
break;
|
||||
case SDL_EVENT_GAMEPAD_ADDED:
|
||||
case SDL_EVENT_GAMEPAD_REMOVED:
|
||||
case SDL_EVENT_GAMEPAD_REMAPPED:
|
||||
case SDL_EVENT_GAMEPAD_UPDATE_COMPLETE:
|
||||
case SDL_EVENT_GAMEPAD_STEAM_HANDLE_UPDATED:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->gdevice.which);
|
||||
break;
|
||||
case SDL_EVENT_GAMEPAD_AXIS_MOTION:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->gaxis.which);
|
||||
wota_write_text(wb, "axis");
|
||||
wota_write_text(wb, SDL_GetGamepadStringForAxis(e->gaxis.axis));
|
||||
wota_write_text(wb, "value");
|
||||
// Normalize axis values
|
||||
double normalized_value;
|
||||
if (e->gaxis.axis == SDL_GAMEPAD_AXIS_LEFT_TRIGGER ||
|
||||
e->gaxis.axis == SDL_GAMEPAD_AXIS_RIGHT_TRIGGER) {
|
||||
// Triggers: 0 to 32767 -> 0 to 1
|
||||
normalized_value = (double)e->gaxis.value / 32767.0;
|
||||
} else {
|
||||
// Thumbsticks: -32768 to 32767 -> -1 to 1
|
||||
normalized_value = (double)e->gaxis.value / 32767.0;
|
||||
}
|
||||
wota_write_number(wb, normalized_value);
|
||||
break;
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_DOWN:
|
||||
case SDL_EVENT_GAMEPAD_BUTTON_UP:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->gbutton.which);
|
||||
wota_write_text(wb, "button");
|
||||
wota_write_text(wb, SDL_GetGamepadStringForButton(e->gbutton.button));
|
||||
wota_write_text(wb, "down");
|
||||
wota_write_sym(wb, e->gbutton.down ? WOTA_TRUE : WOTA_FALSE);
|
||||
break;
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_DOWN:
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_MOTION:
|
||||
case SDL_EVENT_GAMEPAD_TOUCHPAD_UP:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->gtouchpad.which);
|
||||
wota_write_text(wb, "touchpad");
|
||||
wota_write_number(wb, (double)e->gtouchpad.touchpad);
|
||||
wota_write_text(wb, "finger");
|
||||
wota_write_number(wb, (double)e->gtouchpad.finger);
|
||||
wota_write_text(wb, "pos");
|
||||
wota_write_vec2(wb, (double)e->gtouchpad.x, (double)e->gtouchpad.y);
|
||||
wota_write_text(wb, "pressure");
|
||||
wota_write_number(wb, (double)e->gtouchpad.pressure);
|
||||
break;
|
||||
case SDL_EVENT_GAMEPAD_SENSOR_UPDATE:
|
||||
wota_write_text(wb, "which");
|
||||
wota_write_number(wb, (double)e->gsensor.which);
|
||||
wota_write_text(wb, "sensor");
|
||||
wota_write_number(wb, (double)e->gsensor.sensor);
|
||||
wota_write_text(wb, "sensor_timestamp");
|
||||
wota_write_number(wb, (double)e->gsensor.sensor_timestamp);
|
||||
break;
|
||||
case SDL_EVENT_USER:
|
||||
wota_write_text(wb, "cb");
|
||||
wota_write_number(wb, (double)(uintptr_t)e->user.data1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static WotaBuffer event2wota(const SDL_Event *event) {
|
||||
WotaBuffer wb;
|
||||
wota_buffer_init(&wb, 8);
|
||||
int n = event2wota_count_props(event);
|
||||
event2wota_write(&wb, event, n);
|
||||
return wb;
|
||||
}
|
||||
|
||||
// Get all events directly from SDL event queue
|
||||
JSC_CCALL(input_get_events,
|
||||
JSValue events_array = JS_NewArray(js);
|
||||
SDL_Event event;
|
||||
int event_count = 0;
|
||||
|
||||
while (SDL_PollEvent(&event)) {
|
||||
// gui_input(&event);
|
||||
|
||||
WotaBuffer wb = event2wota(&event);
|
||||
JSValue event_obj = wota2value(js, wb.data);
|
||||
JS_SetPropertyUint32(js, events_array, event_count, event_obj);
|
||||
wota_buffer_free(&wb);
|
||||
event_count++;
|
||||
}
|
||||
|
||||
return events_array;
|
||||
)
|
||||
|
||||
JSC_CCALL(input_gamepad_id_to_type,
|
||||
int id = js2number(js, argv[0]);
|
||||
return JS_NewString(js, SDL_GetGamepadStringForType(SDL_GetGamepadTypeForID(id)));
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_input_funcs[] = {
|
||||
MIST_FUNC_DEF(input, mouse_show, 1),
|
||||
MIST_FUNC_DEF(input, mouse_lock, 1),
|
||||
MIST_FUNC_DEF(input, keyname, 1),
|
||||
MIST_FUNC_DEF(input, keymod, 0),
|
||||
MIST_FUNC_DEF(input, mousestate, 0),
|
||||
MIST_FUNC_DEF(input, get_events, 0),
|
||||
MIST_FUNC_DEF(input, gamepad_id_to_type, 1),
|
||||
};
|
||||
|
||||
CELL_USE_FUNCS(js_input_funcs)
|
||||
@@ -1,521 +0,0 @@
|
||||
#include "cell.h"
|
||||
#include "prosperon.h"
|
||||
#include <SDL3/SDL.h>
|
||||
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "HandmadeMath.h"
|
||||
#include "sdl.h"
|
||||
#include <assert.h>
|
||||
|
||||
void SDL_Renderer_free() {
|
||||
|
||||
}
|
||||
|
||||
void SDL_Texture_free() {
|
||||
|
||||
}
|
||||
|
||||
typedef rect SDL_Rect;
|
||||
|
||||
QJSCLASS(SDL_Renderer,)
|
||||
QJSCLASS(SDL_Texture,)
|
||||
|
||||
rect transform_rect(rect in, HMM_Mat3 *t)
|
||||
{
|
||||
HMM_Vec3 bottom_left = (HMM_Vec3){in.x,in.y,1.0};
|
||||
HMM_Vec3 transformed_bl = HMM_MulM3V3(*t, bottom_left);
|
||||
in.x = transformed_bl.x;
|
||||
in.y = transformed_bl.y;
|
||||
in.y = in.y - in.h; // should be done for any platform that draws rectangles from top left
|
||||
return in;
|
||||
}
|
||||
|
||||
HMM_Vec2 transform_point(SDL_Renderer *ren, HMM_Vec2 in, HMM_Mat3 *t)
|
||||
{
|
||||
rect logical;
|
||||
SDL_GetRenderLogicalPresentationRect(ren, &logical);
|
||||
in.y *= -1;
|
||||
in.y += logical.h;
|
||||
in.x -= t->Columns[2].x;
|
||||
in.y -= t->Columns[2].y;
|
||||
return in;
|
||||
}
|
||||
|
||||
JSC_CCALL(SDL_Renderer_clear,
|
||||
SDL_Renderer *renderer = js2SDL_Renderer(js,self);
|
||||
SDL_RenderClear(renderer);
|
||||
)
|
||||
|
||||
JSC_CCALL(SDL_Renderer_present,
|
||||
SDL_Renderer *ren = js2SDL_Renderer(js,self);
|
||||
SDL_RenderPresent(ren);
|
||||
)
|
||||
|
||||
JSC_CCALL(SDL_Renderer_draw_color,
|
||||
SDL_Renderer *renderer = js2SDL_Renderer(js,self);
|
||||
colorf color = js2color(js,argv[0]);
|
||||
SDL_SetRenderDrawColorFloat(renderer, color.r,color.g,color.b,color.a);
|
||||
)
|
||||
|
||||
JSC_CCALL(SDL_Renderer_rect,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (!JS_IsNull(argv[1])) {
|
||||
colorf color = js2color(js,argv[1]);
|
||||
SDL_SetRenderDrawColorFloat(r, color.r, color.g, color.b, color.a);
|
||||
}
|
||||
|
||||
if (JS_IsArray(js,argv[0])) {
|
||||
int len = JS_ArrayLength(js,argv[0]);
|
||||
rect rects[len];
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue val = JS_GetPropertyUint32(js,argv[0],i);
|
||||
rects[i] = transform_rect(js2rect(js,val), &cam_mat);
|
||||
JS_FreeValue(js,val);
|
||||
}
|
||||
SDL_RenderRects(r,rects,len);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
rect rect = js2rect(js,argv[0]);
|
||||
|
||||
rect = transform_rect(rect, &cam_mat);
|
||||
|
||||
SDL_RenderRect(r, &rect);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_load_texture,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
SDL_Surface *surf = js2SDL_Surface(js,argv[0]);
|
||||
if (!surf) return JS_ThrowReferenceError(js, "Surface was not a surface.");
|
||||
SDL_Texture *tex = SDL_CreateTextureFromSurface(r,surf);
|
||||
if (!tex) return JS_ThrowReferenceError(js, "Could not create texture from surface: %s", SDL_GetError());
|
||||
ret = SDL_Texture2js(js,tex);
|
||||
JS_SetPropertyStr(js,ret,"width", number2js(js,tex->w));
|
||||
JS_SetPropertyStr(js,ret,"height", number2js(js,tex->h));
|
||||
)
|
||||
|
||||
JSC_CCALL(SDL_Renderer_fillrect,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (!JS_IsNull(argv[1])) {
|
||||
colorf color = js2color(js,argv[1]);
|
||||
SDL_SetRenderDrawColorFloat(r, color.r, color.g, color.b, color.a);
|
||||
}
|
||||
|
||||
if (JS_IsArray(js,argv[0])) {
|
||||
int len = JS_ArrayLength(js,argv[0]);
|
||||
rect rects[len];
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue val = JS_GetPropertyUint32(js,argv[0],i);
|
||||
rects[i] = js2rect(js,val);
|
||||
JS_FreeValue(js,val);
|
||||
}
|
||||
if (!SDL_RenderFillRects(r,rects,len))
|
||||
return JS_ThrowReferenceError(js, "Could not render rectangle: %s", SDL_GetError());
|
||||
}
|
||||
rect rect = transform_rect(js2rect(js,argv[0]),&cam_mat);
|
||||
|
||||
if (!SDL_RenderFillRect(r, &rect))
|
||||
return JS_ThrowReferenceError(js, "Could not render rectangle: %s", SDL_GetError());
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_texture,
|
||||
SDL_Renderer *renderer = js2SDL_Renderer(js,self);
|
||||
SDL_Texture *tex = js2SDL_Texture(js,argv[0]);
|
||||
rect dst = transform_rect(js2rect(js,argv[1]), &cam_mat);
|
||||
|
||||
if (!JS_IsNull(argv[3])) {
|
||||
colorf color = js2color(js,argv[3]);
|
||||
SDL_SetTextureColorModFloat(tex, color.r, color.g, color.b);
|
||||
SDL_SetTextureAlphaModFloat(tex,color.a);
|
||||
}
|
||||
if (JS_IsNull(argv[2]))
|
||||
SDL_RenderTexture(renderer,tex,NULL,&dst);
|
||||
else {
|
||||
|
||||
rect src = js2rect(js,argv[2]);
|
||||
|
||||
SDL_RenderTextureRotated(renderer, tex, &src, &dst, 0, NULL, SDL_FLIP_NONE);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_tile,
|
||||
SDL_Renderer *renderer = js2SDL_Renderer(js,self);
|
||||
if (!renderer) return JS_ThrowTypeError(js,"self was not a renderer");
|
||||
SDL_Texture *tex = js2SDL_Texture(js,argv[0]);
|
||||
if (!tex) return JS_ThrowTypeError(js,"first argument was not a texture");
|
||||
rect dst = js2rect(js,argv[1]);
|
||||
if (!dst.w) dst.w = tex->w;
|
||||
if (!dst.h) dst.h = tex->h;
|
||||
float scale = js2number(js,argv[3]);
|
||||
if (!scale) scale = 1;
|
||||
if (JS_IsNull(argv[2]))
|
||||
SDL_RenderTextureTiled(renderer,tex,NULL,scale, &dst);
|
||||
else {
|
||||
rect src = js2rect(js,argv[2]);
|
||||
SDL_RenderTextureTiled(renderer,tex,&src,scale, &dst);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_slice9,
|
||||
SDL_Renderer *renderer = js2SDL_Renderer(js,self);
|
||||
SDL_Texture *tex = js2SDL_Texture(js,argv[0]);
|
||||
lrtb bounds = js2lrtb(js,argv[2]);
|
||||
rect src, dst;
|
||||
src = transform_rect(js2rect(js,argv[3]),&cam_mat);
|
||||
dst = transform_rect(js2rect(js,argv[1]), &cam_mat);
|
||||
|
||||
SDL_RenderTexture9Grid(renderer, tex,
|
||||
JS_IsNull(argv[3]) ? NULL : &src,
|
||||
bounds.l, bounds.r, bounds.t, bounds.b, 0.0,
|
||||
JS_IsNull(argv[1]) ? NULL : &dst);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_get_image,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
SDL_Surface *surf = NULL;
|
||||
if (!JS_IsNull(argv[0])) {
|
||||
rect rect = js2rect(js,argv[0]);
|
||||
surf = SDL_RenderReadPixels(r,&rect);
|
||||
} else
|
||||
surf = SDL_RenderReadPixels(r,NULL);
|
||||
if (!surf) return JS_ThrowReferenceError(js, "could not make surface from renderer");
|
||||
return SDL_Surface2js(js,surf);
|
||||
)
|
||||
|
||||
JSC_SCALL(renderer_fasttext,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (!JS_IsNull(argv[2])) {
|
||||
colorf color = js2color(js,argv[2]);
|
||||
SDL_SetRenderDrawColorFloat(r, color.r, color.g, color.b, color.a);
|
||||
}
|
||||
HMM_Vec2 pos = js2vec2(js,argv[1]);
|
||||
pos.y += 8;
|
||||
HMM_Vec2 tpos = HMM_MulM3V3(cam_mat, (HMM_Vec3){pos.x,pos.y,1}).xy;
|
||||
SDL_RenderDebugText(r, tpos.x, tpos.y, str);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_line,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (!JS_IsNull(argv[1])) {
|
||||
colorf color = js2color(js,argv[1]);
|
||||
SDL_SetRenderDrawColorFloat(r, color.r, color.g, color.b, color.a);
|
||||
}
|
||||
|
||||
if (JS_IsArray(js,argv[0])) {
|
||||
int len = JS_ArrayLength(js,argv[0]);
|
||||
HMM_Vec2 points[len];
|
||||
assert(sizeof(HMM_Vec2) == sizeof(SDL_FPoint));
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue val = JS_GetPropertyUint32(js,argv[0],i);
|
||||
points[i] = js2vec2(js,val);
|
||||
JS_FreeValue(js,val);
|
||||
}
|
||||
SDL_RenderLines(r,points,len);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_point,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (!JS_IsNull(argv[1])) {
|
||||
colorf color = js2color(js,argv[1]);
|
||||
SDL_SetRenderDrawColorFloat(r, color.r, color.g, color.b, color.a);
|
||||
}
|
||||
|
||||
if (JS_IsArray(js,argv[0])) {
|
||||
int len = JS_ArrayLength(js,argv[0]);
|
||||
HMM_Vec2 points[len];
|
||||
assert(sizeof(HMM_Vec2) ==sizeof(SDL_FPoint));
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue val = JS_GetPropertyUint32(js,argv[0],i);
|
||||
points[i] = js2vec2(js,val);
|
||||
JS_FreeValue(js,val);
|
||||
}
|
||||
SDL_RenderPoints(r, points, len);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
HMM_Vec2 point = transform_point(r, js2vec2(js,argv[0]), &cam_mat);
|
||||
SDL_RenderPoint(r,point.x,point.y);
|
||||
)
|
||||
|
||||
// Function to translate a list of 2D points
|
||||
void Translate2DPoints(HMM_Vec2 *points, int count, HMM_Vec3 position, HMM_Quat rotation, HMM_Vec3 scale) {
|
||||
// Precompute the 2D rotation matrix from the quaternion
|
||||
float xx = rotation.x * rotation.x;
|
||||
float yy = rotation.y * rotation.y;
|
||||
float zz = rotation.z * rotation.z;
|
||||
float xy = rotation.x * rotation.y;
|
||||
float zw = rotation.z * rotation.w;
|
||||
|
||||
// Extract 2D affine rotation and scaling
|
||||
float m00 = (1.0f - 2.0f * (yy + zz)) * scale.x; // Row 1, Column 1
|
||||
float m01 = (2.0f * (xy + zw)) * scale.y; // Row 1, Column 2
|
||||
float m10 = (2.0f * (xy - zw)) * scale.x; // Row 2, Column 1
|
||||
float m11 = (1.0f - 2.0f * (xx + zz)) * scale.y; // Row 2, Column 2
|
||||
|
||||
// Translation components (ignore the z position)
|
||||
float tx = position.x;
|
||||
float ty = position.y;
|
||||
|
||||
// Transform each point
|
||||
for (int i = 0; i < count; ++i) {
|
||||
HMM_Vec2 p = points[i];
|
||||
points[i].x = m00 * p.x + m01 * p.y + tx;
|
||||
points[i].y = m10 * p.x + m11 * p.y + ty;
|
||||
}
|
||||
}
|
||||
|
||||
// Should take a single struct with pos, color, uv, and indices arrays
|
||||
JSC_CCALL(renderer_geometry,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
JSValue pos = JS_GetPropertyStr(js,argv[1], "pos");
|
||||
JSValue color = JS_GetPropertyStr(js,argv[1], "color");
|
||||
JSValue uv = JS_GetPropertyStr(js,argv[1], "uv");
|
||||
JSValue indices = JS_GetPropertyStr(js,argv[1], "indices");
|
||||
JSValue js_vertices = JS_GetPropertyStr(js,argv[1], "vertices");
|
||||
JSValue js_count = JS_GetPropertyStr(js,argv[1], "count");
|
||||
int vertices, count;
|
||||
JS_ToInt32(js, &vertices, js_vertices);
|
||||
JS_ToInt32(js, &count, js_count);
|
||||
JS_FreeValue(js, js_vertices);
|
||||
JS_FreeValue(js, js_count);
|
||||
|
||||
size_t pos_stride, indices_stride, uv_stride, color_stride;
|
||||
void *posdata = get_gpu_buffer(js,pos, &pos_stride, NULL);
|
||||
void *idxdata = get_gpu_buffer(js,indices, &indices_stride, NULL);
|
||||
void *uvdata = get_gpu_buffer(js,uv, &uv_stride, NULL);
|
||||
void *colordata = get_gpu_buffer(js,color,&color_stride, NULL);
|
||||
|
||||
SDL_Texture *tex = js2SDL_Texture(js,argv[0]);
|
||||
|
||||
HMM_Vec2 *trans_pos = malloc(vertices*sizeof(HMM_Vec2));
|
||||
memcpy(trans_pos,posdata, sizeof(HMM_Vec2)*vertices);
|
||||
|
||||
for (int i = 0; i < vertices; i++)
|
||||
trans_pos[i] = HMM_MulM3V3(cam_mat, (HMM_Vec3){trans_pos[i].x, trans_pos[i].y, 1}).xy;
|
||||
|
||||
if (!SDL_RenderGeometryRaw(r, tex, trans_pos, pos_stride,colordata,color_stride,uvdata, uv_stride, vertices, idxdata, count, indices_stride))
|
||||
ret = JS_ThrowReferenceError(js, "Error rendering geometry: %s",SDL_GetError());
|
||||
|
||||
free(trans_pos);
|
||||
|
||||
JS_FreeValue(js,pos);
|
||||
JS_FreeValue(js,color);
|
||||
JS_FreeValue(js,uv);
|
||||
JS_FreeValue(js,indices);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_logical_size,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
HMM_Vec2 v = js2vec2(js,argv[0]);
|
||||
SDL_SetRenderLogicalPresentation(r,v.x,v.y,SDL_LOGICAL_PRESENTATION_INTEGER_SCALE);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_viewport,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (JS_IsNull(argv[0]))
|
||||
SDL_SetRenderViewport(r,NULL);
|
||||
else {
|
||||
rect view = js2rect(js,argv[0]);
|
||||
SDL_SetRenderViewport(r,&view);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_get_viewport,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
SDL_Rect vp;
|
||||
SDL_GetRenderViewport(r, &vp);
|
||||
rect re;
|
||||
re.x = vp.x;
|
||||
re.y = vp.y;
|
||||
re.h = vp.h;
|
||||
re.w = vp.w;
|
||||
return rect2js(js,re);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_clip,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (JS_IsNull(argv[0]))
|
||||
SDL_SetRenderClipRect(r,NULL);
|
||||
else {
|
||||
rect view = js2rect(js,argv[0]);
|
||||
SDL_SetRenderClipRect(r,&view);
|
||||
}
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_scale,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
HMM_Vec2 v = js2vec2(js,argv[0]);
|
||||
SDL_SetRenderScale(r, v.x, v.y);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_vsync,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
SDL_SetRenderVSync(r,js2number(js,argv[0]));
|
||||
)
|
||||
|
||||
// This returns the coordinates inside the
|
||||
JSC_CCALL(renderer_coords,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
HMM_Vec2 pos, coord;
|
||||
pos = js2vec2(js,argv[0]);
|
||||
SDL_RenderCoordinatesFromWindow(r,pos.x,pos.y, &coord.x, &coord.y);
|
||||
return vec22js(js,coord);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_camera,
|
||||
int centered = JS_ToBool(js,argv[1]);
|
||||
SDL_Renderer *ren = js2SDL_Renderer(js,self);
|
||||
SDL_Rect vp;
|
||||
SDL_GetRenderViewport(ren, &vp);
|
||||
HMM_Mat3 proj;
|
||||
proj.Columns[0] = (HMM_Vec3){1,0,0};
|
||||
proj.Columns[1] = (HMM_Vec3){0,-1,0};
|
||||
if (centered)
|
||||
proj.Columns[2] = (HMM_Vec3){vp.w/2.0,vp.h/2.0,1};
|
||||
else
|
||||
proj.Columns[2] = (HMM_Vec3){0,vp.h,1};
|
||||
|
||||
transform *tra = js2transform(js,argv[0]);
|
||||
HMM_Mat3 view;
|
||||
view.Columns[0] = (HMM_Vec3){1,0,0};
|
||||
view.Columns[1] = (HMM_Vec3){0,1,0};
|
||||
view.Columns[2] = (HMM_Vec3){-tra->pos.x, -tra->pos.y,1};
|
||||
cam_mat = HMM_MulM3(proj,view);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_screen2world,
|
||||
HMM_Mat3 inv = HMM_InvGeneralM3(cam_mat);
|
||||
HMM_Vec3 pos = js2vec3(js,argv[0]);
|
||||
return vec22js(js, HMM_MulM3V3(inv, pos).xy);
|
||||
)
|
||||
|
||||
JSC_CCALL(renderer_target,
|
||||
SDL_Renderer *r = js2SDL_Renderer(js,self);
|
||||
if (JS_IsNull(argv[0]))
|
||||
SDL_SetRenderTarget(r, NULL);
|
||||
else {
|
||||
SDL_Texture *tex = js2SDL_Texture(js,argv[0]);
|
||||
SDL_SetRenderTarget(r,tex);
|
||||
}
|
||||
)
|
||||
|
||||
// Given an array of sprites, make the necessary geometry
|
||||
// A sprite is expected to have:
|
||||
// transform: a transform encoding position and rotation. its scale is in pixels - so a scale of 1 means the image will draw only on a single pixel.
|
||||
// image: a standard prosperon image of a surface, rect, and texture
|
||||
// color: the color this sprite should be hued by
|
||||
JSC_CCALL(renderer_make_sprite_mesh,
|
||||
JSValue sprites = argv[0];
|
||||
size_t quads = JS_ArrayLength(js,argv[0]);
|
||||
size_t verts = quads*4;
|
||||
size_t count = quads*6;
|
||||
|
||||
HMM_Vec2 *posdata = malloc(sizeof(*posdata)*verts);
|
||||
HMM_Vec2 *uvdata = malloc(sizeof(*uvdata)*verts);
|
||||
HMM_Vec4 *colordata = malloc(sizeof(*colordata)*verts);
|
||||
|
||||
for (int i = 0; i < quads; i++) {
|
||||
JSValue sub = JS_GetPropertyUint32(js,sprites,i);
|
||||
JSValue jstransform = JS_GetPropertyStr(js,sub,"transform");
|
||||
|
||||
JSValue jssrc = JS_GetPropertyStr(js,sub,"src");
|
||||
JSValue jscolor = JS_GetPropertyStr(js,sub,"color");
|
||||
HMM_Vec4 color;
|
||||
|
||||
rect src;
|
||||
if (JS_IsNull(jssrc))
|
||||
src = (rect){.x = 0, .y = 0, .w = 1, .h = 1};
|
||||
else
|
||||
src = js2rect(js,jssrc);
|
||||
|
||||
if (JS_IsNull(jscolor))
|
||||
color = (HMM_Vec4){1,1,1,1};
|
||||
else
|
||||
color = js2vec4(js,jscolor);
|
||||
|
||||
// Calculate the base index for the current quad
|
||||
size_t base = i * 4;
|
||||
|
||||
// Define the UV coordinates based on the source rectangle
|
||||
uvdata[base + 0] = (HMM_Vec2){ src.x, src.y + src.h };
|
||||
uvdata[base + 1] = (HMM_Vec2){ src.x + src.w, src.y + src.h };
|
||||
uvdata[base + 2] = (HMM_Vec2){ src.x, src.y };
|
||||
uvdata[base + 3] = (HMM_Vec2){ src.x + src.w, src.y };
|
||||
|
||||
colordata[base] = color;
|
||||
colordata[base+1] = color;
|
||||
colordata[base+2] = color;
|
||||
colordata[base+3] = color;
|
||||
|
||||
JS_FreeValue(js,jstransform);
|
||||
JS_FreeValue(js,sub);
|
||||
JS_FreeValue(js,jscolor);
|
||||
JS_FreeValue(js,jssrc);
|
||||
}
|
||||
|
||||
ret = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, ret, "pos", make_gpu_buffer(js, posdata, sizeof(*posdata) * verts, JS_TYPED_ARRAY_FLOAT32, 2, 0,0));
|
||||
JS_SetPropertyStr(js, ret, "uv", make_gpu_buffer(js, uvdata, sizeof(*uvdata) * verts, JS_TYPED_ARRAY_FLOAT32, 2, 0,0));
|
||||
JS_SetPropertyStr(js, ret, "color", make_gpu_buffer(js, colordata, sizeof(*colordata) * verts, JS_TYPED_ARRAY_FLOAT32, 4, 0,0));
|
||||
JS_SetPropertyStr(js, ret, "indices", make_quad_indices_buffer(js, quads));
|
||||
JS_SetPropertyStr(js, ret, "vertices", number2js(js, verts));
|
||||
JS_SetPropertyStr(js, ret, "count", number2js(js, count));
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_renderer_funcs[] = {
|
||||
JS_CFUNC_DEF("clear", 0, js_renderer_clear),
|
||||
JS_CFUNC_DEF("present", 0, js_renderer_present),
|
||||
JS_CFUNC_DEF("draw_color", 1, js_renderer_draw_color),
|
||||
JS_CFUNC_DEF("rect", 2, js_renderer_rect),
|
||||
JS_CFUNC_DEF("fillrect", 2, js_renderer_fillrect),
|
||||
JS_CFUNC_DEF("line", 2, js_renderer_line),
|
||||
JS_CFUNC_DEF("point", 2, js_renderer_point),
|
||||
JS_CFUNC_DEF("load_texture", 1, js_renderer_load_texture),
|
||||
JS_CFUNC_DEF("texture", 4, js_renderer_texture),
|
||||
JS_CFUNC_DEF("slice9", 4, js_renderer_slice9),
|
||||
JS_CFUNC_DEF("tile", 4, js_renderer_tile),
|
||||
JS_CFUNC_DEF("get_image", 1, js_renderer_get_image),
|
||||
JS_CFUNC_DEF("fasttext", 2, js_renderer_fasttext),
|
||||
JS_CFUNC_DEF("geometry", 2, js_renderer_geometry),
|
||||
JS_CFUNC_DEF("scale", 1, js_renderer_scale),
|
||||
JS_CFUNC_DEF("logical_size", 1, js_renderer_logical_size),
|
||||
JS_CFUNC_DEF("viewport", 1, js_renderer_viewport),
|
||||
JS_CFUNC_DEF("clip", 1, js_renderer_clip),
|
||||
JS_CFUNC_DEF("vsync", 1, js_renderer_vsync),
|
||||
JS_CFUNC_DEF("coords", 1, js_renderer_coords),
|
||||
JS_CFUNC_DEF("camera", 2, js_renderer_camera),
|
||||
JS_CFUNC_DEF("get_viewport", 0, js_renderer_get_viewport),
|
||||
JS_CFUNC_DEF("screen2world", 1, js_renderer_screen2world),
|
||||
JS_CFUNC_DEF("target", 1, js_renderer_target),
|
||||
JS_CFUNC_DEF("make_sprite_mesh",2, js_renderer_make_sprite_mesh),
|
||||
};
|
||||
|
||||
JSC_CCALL(mod_create,
|
||||
SDL_Window *win = js2SDL_Window(js,self);
|
||||
SDL_PropertiesID props = SDL_CreateProperties();
|
||||
SDL_SetNumberProperty(props, SDL_PROP_RENDERER_CREATE_PRESENT_VSYNC_NUMBER, 0);
|
||||
SDL_SetPointerProperty(props, SDL_PROP_RENDERER_CREATE_WINDOW_POINTER, win);
|
||||
SDL_SetStringProperty(props, SDL_PROP_RENDERER_CREATE_NAME_STRING, str);
|
||||
SDL_Renderer *r = SDL_CreateRendererWithProperties(props);
|
||||
SDL_DestroyProperties(props);
|
||||
if (!r) return JS_ThrowReferenceError(js, "Error creating renderer: %s",SDL_GetError());
|
||||
SDL_SetRenderDrawBlendMode(r, SDL_BLENDMODE_BLEND);
|
||||
return SDL_Renderer2js(js,r);
|
||||
)
|
||||
|
||||
static const JSCFunctionListEntry js_mod_funcs[] = {
|
||||
JS_CFUNC_DEF("create", 1, js_mod_create)
|
||||
};
|
||||
|
||||
CELL_USE_INIT(
|
||||
JSValue obj = JS_NewObject(ctx);
|
||||
|
||||
// Add all the above C functions as properties of that object
|
||||
JS_SetPropertyFunctionList(ctx, obj,
|
||||
js_renderer_funcs,
|
||||
sizeof(js_renderer_funcs)/sizeof(JSCFunctionListEntry));
|
||||
return obj;
|
||||
)
|
||||
1094
sdl/surface.c
1094
sdl/surface.c
File diff suppressed because it is too large
Load Diff
762
sdl/video.c
762
sdl/video.c
@@ -1,762 +0,0 @@
|
||||
#include "cell.h"
|
||||
#include "prosperon.h"
|
||||
|
||||
#include <SDL3/SDL.h>
|
||||
#include <SDL3/SDL_gpu.h>
|
||||
#include <SDL3/SDL_error.h>
|
||||
#include <SDL3/SDL_properties.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "sdl.h"
|
||||
|
||||
// SDL Window free function
|
||||
void SDL_Window_free(JSRuntime *rt, SDL_Window *w)
|
||||
{
|
||||
SDL_DestroyWindow(w);
|
||||
}
|
||||
|
||||
QJSCLASS(SDL_Window,)
|
||||
|
||||
void SDL_Cursor_free(JSRuntime *rt, SDL_Cursor *c)
|
||||
{
|
||||
SDL_DestroyCursor(c);
|
||||
}
|
||||
|
||||
QJSCLASS(SDL_Cursor,)
|
||||
|
||||
// Forward declarations for blend mode helpers
|
||||
static JSValue blendmode2js(JSContext *js, SDL_BlendMode mode);
|
||||
static SDL_BlendMode js2blendmode(JSContext *js, JSValue v);
|
||||
|
||||
// Window constructor function
|
||||
static JSValue js_SDL_Window_constructor(JSContext *js, JSValueConst new_target, int argc, JSValueConst *argv)
|
||||
{
|
||||
SDL_Window *www = SDL_CreateWindow("prosperon", 500, 500, 0);
|
||||
return SDL_Window2js(js, www);
|
||||
if (argc < 1 || !JS_IsObject(argv[0]))
|
||||
return JS_ThrowTypeError(js, "Window constructor requires an object argument");
|
||||
|
||||
JSValue opts = argv[0];
|
||||
|
||||
// Get basic properties (defaults are handled in JavaScript)
|
||||
const char *title = NULL;
|
||||
JSValue title_val = JS_GetPropertyStr(js, opts, "title");
|
||||
if (!JS_IsNull(title_val) && !JS_IsNull(title_val)) {
|
||||
title = JS_ToCString(js, title_val);
|
||||
}
|
||||
JS_FreeValue(js, title_val);
|
||||
|
||||
if (!title) {
|
||||
return JS_ThrowTypeError(js, "Window title is required");
|
||||
}
|
||||
|
||||
int width = 640;
|
||||
JSValue width_val = JS_GetPropertyStr(js, opts, "width");
|
||||
if (!JS_IsNull(width_val) && !JS_IsNull(width_val)) {
|
||||
width = js2number(js, width_val);
|
||||
}
|
||||
JS_FreeValue(js, width_val);
|
||||
|
||||
int height = 480;
|
||||
JSValue height_val = JS_GetPropertyStr(js, opts, "height");
|
||||
if (!JS_IsNull(height_val) && !JS_IsNull(height_val)) {
|
||||
height = js2number(js, height_val);
|
||||
}
|
||||
JS_FreeValue(js, height_val);
|
||||
|
||||
// Create SDL properties object
|
||||
SDL_PropertiesID props = SDL_CreateProperties();
|
||||
|
||||
// Always set basic properties
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_WIDTH_NUMBER, width);
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_HEIGHT_NUMBER, height);
|
||||
SDL_SetStringProperty(props, SDL_PROP_WINDOW_CREATE_TITLE_STRING, title);
|
||||
|
||||
// Handle window position
|
||||
JSValue x_val = JS_GetPropertyStr(js, opts, "x");
|
||||
if (!JS_IsNull(x_val)) {
|
||||
if (JS_IsString(x_val)) {
|
||||
const char *pos = JS_ToCString(js, x_val);
|
||||
if (strcmp(pos, "centered") == 0)
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_X_NUMBER, SDL_WINDOWPOS_CENTERED);
|
||||
else
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_X_NUMBER, SDL_WINDOWPOS_UNDEFINED);
|
||||
JS_FreeCString(js, pos);
|
||||
} else {
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_X_NUMBER, js2number(js, x_val));
|
||||
}
|
||||
}
|
||||
JS_FreeValue(js, x_val);
|
||||
|
||||
JSValue y_val = JS_GetPropertyStr(js, opts, "y");
|
||||
if (!JS_IsNull(y_val)) {
|
||||
if (JS_IsString(y_val)) {
|
||||
const char *pos = JS_ToCString(js, y_val);
|
||||
if (strcmp(pos, "centered") == 0)
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_Y_NUMBER, SDL_WINDOWPOS_CENTERED);
|
||||
else
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_Y_NUMBER, SDL_WINDOWPOS_UNDEFINED);
|
||||
JS_FreeCString(js, pos);
|
||||
} else {
|
||||
SDL_SetNumberProperty(props, SDL_PROP_WINDOW_CREATE_Y_NUMBER, js2number(js, y_val));
|
||||
}
|
||||
}
|
||||
JS_FreeValue(js, y_val);
|
||||
|
||||
// Helper function to check and set boolean properties
|
||||
#define SET_BOOL_PROP(js_name, sdl_prop) do { \
|
||||
JSValue val = JS_GetPropertyStr(js, opts, js_name); \
|
||||
if (!JS_IsNull(val)) { \
|
||||
SDL_SetBooleanProperty(props, sdl_prop, JS_ToBool(js, val)); \
|
||||
} \
|
||||
JS_FreeValue(js, val); \
|
||||
} while(0)
|
||||
|
||||
// Set all boolean properties directly on the SDL properties object
|
||||
SET_BOOL_PROP("resizable", SDL_PROP_WINDOW_CREATE_RESIZABLE_BOOLEAN);
|
||||
SET_BOOL_PROP("fullscreen", SDL_PROP_WINDOW_CREATE_FULLSCREEN_BOOLEAN);
|
||||
SET_BOOL_PROP("hidden", SDL_PROP_WINDOW_CREATE_HIDDEN_BOOLEAN);
|
||||
SET_BOOL_PROP("borderless", SDL_PROP_WINDOW_CREATE_BORDERLESS_BOOLEAN);
|
||||
SET_BOOL_PROP("alwaysOnTop", SDL_PROP_WINDOW_CREATE_ALWAYS_ON_TOP_BOOLEAN);
|
||||
SET_BOOL_PROP("minimized", SDL_PROP_WINDOW_CREATE_MINIMIZED_BOOLEAN);
|
||||
SET_BOOL_PROP("maximized", SDL_PROP_WINDOW_CREATE_MAXIMIZED_BOOLEAN);
|
||||
SET_BOOL_PROP("mouseGrabbed", SDL_PROP_WINDOW_CREATE_MOUSE_GRABBED_BOOLEAN);
|
||||
SET_BOOL_PROP("highPixelDensity", SDL_PROP_WINDOW_CREATE_HIGH_PIXEL_DENSITY_BOOLEAN);
|
||||
SET_BOOL_PROP("transparent", SDL_PROP_WINDOW_CREATE_TRANSPARENT_BOOLEAN);
|
||||
SET_BOOL_PROP("utility", SDL_PROP_WINDOW_CREATE_UTILITY_BOOLEAN);
|
||||
SET_BOOL_PROP("tooltip", SDL_PROP_WINDOW_CREATE_TOOLTIP_BOOLEAN);
|
||||
SET_BOOL_PROP("popupMenu", SDL_PROP_WINDOW_CREATE_MENU_BOOLEAN);
|
||||
SET_BOOL_PROP("opengl", SDL_PROP_WINDOW_CREATE_OPENGL_BOOLEAN);
|
||||
SET_BOOL_PROP("vulkan", SDL_PROP_WINDOW_CREATE_VULKAN_BOOLEAN);
|
||||
SET_BOOL_PROP("metal", SDL_PROP_WINDOW_CREATE_METAL_BOOLEAN);
|
||||
SET_BOOL_PROP("modal", SDL_PROP_WINDOW_CREATE_MODAL_BOOLEAN);
|
||||
SET_BOOL_PROP("externalGraphicsContext", SDL_PROP_WINDOW_CREATE_EXTERNAL_GRAPHICS_CONTEXT_BOOLEAN);
|
||||
|
||||
// Handle focusable (inverse logic)
|
||||
JSValue focusable_val = JS_GetPropertyStr(js, opts, "focusable");
|
||||
if (!JS_IsNull(focusable_val)) {
|
||||
SDL_SetBooleanProperty(props, SDL_PROP_WINDOW_CREATE_FOCUSABLE_BOOLEAN, JS_ToBool(js, focusable_val));
|
||||
}
|
||||
JS_FreeValue(js, focusable_val);
|
||||
|
||||
// Handle notFocusable (for backwards compatibility)
|
||||
JSValue not_focusable_val = JS_GetPropertyStr(js, opts, "notFocusable");
|
||||
if (!JS_IsNull(not_focusable_val)) {
|
||||
SDL_SetBooleanProperty(props, SDL_PROP_WINDOW_CREATE_FOCUSABLE_BOOLEAN, !JS_ToBool(js, not_focusable_val));
|
||||
}
|
||||
JS_FreeValue(js, not_focusable_val);
|
||||
|
||||
#undef SET_BOOL_PROP
|
||||
|
||||
// Handle parent window
|
||||
JSValue parent_val = JS_GetPropertyStr(js, opts, "parent");
|
||||
if (!JS_IsNull(parent_val) && !JS_IsNull(parent_val)) {
|
||||
SDL_Window *parent = js2SDL_Window(js, parent_val);
|
||||
if (parent) {
|
||||
SDL_SetPointerProperty(props, SDL_PROP_WINDOW_CREATE_PARENT_POINTER, parent);
|
||||
}
|
||||
}
|
||||
JS_FreeValue(js, parent_val);
|
||||
|
||||
// Create window with properties
|
||||
SDL_Window *window = SDL_CreateWindowWithProperties(props);
|
||||
SDL_DestroyProperties(props);
|
||||
|
||||
// Always free the title string since we allocated it
|
||||
if (title) {
|
||||
JS_FreeCString(js, title);
|
||||
}
|
||||
|
||||
if (!window) {
|
||||
return JS_ThrowReferenceError(js, "Failed to create window: %s", SDL_GetError());
|
||||
}
|
||||
|
||||
// Create the window JS object
|
||||
JSValue window_obj = SDL_Window2js(js, window);
|
||||
|
||||
// Set additional properties that can't be set during creation
|
||||
// These will be applied through the property setters
|
||||
|
||||
JSValue opacity_val = JS_GetPropertyStr(js, opts, "opacity");
|
||||
if (!JS_IsNull(opacity_val)) {
|
||||
JS_SetPropertyStr(js, window_obj, "opacity", opacity_val);
|
||||
}
|
||||
|
||||
JSValue min_size_val = JS_GetPropertyStr(js, opts, "minimumSize");
|
||||
if (!JS_IsNull(min_size_val)) {
|
||||
JS_SetPropertyStr(js, window_obj, "minimumSize", min_size_val);
|
||||
}
|
||||
|
||||
JSValue max_size_val = JS_GetPropertyStr(js, opts, "maximumSize");
|
||||
if (!JS_IsNull(max_size_val)) {
|
||||
JS_SetPropertyStr(js, window_obj, "maximumSize", max_size_val);
|
||||
}
|
||||
|
||||
JSValue pos_val = JS_GetPropertyStr(js, opts, "position");
|
||||
if (!JS_IsNull(pos_val)) {
|
||||
JS_SetPropertyStr(js, window_obj, "position", pos_val);
|
||||
}
|
||||
|
||||
// Handle text input
|
||||
JSValue text_input = JS_GetPropertyStr(js, opts, "textInput");
|
||||
if (JS_ToBool(js, text_input)) {
|
||||
// SDL_StartTextInput(window);
|
||||
}
|
||||
JS_FreeValue(js, text_input);
|
||||
|
||||
printf("created window %p\n", window);
|
||||
|
||||
return window_obj;
|
||||
}
|
||||
|
||||
JSC_CCALL(SDL_Window_fullscreen,
|
||||
SDL_SetWindowFullscreen(js2SDL_Window(js,self), SDL_WINDOW_FULLSCREEN)
|
||||
)
|
||||
|
||||
JSValue js_SDL_Window_keyboard_shown(JSContext *js, JSValue self, int argc, JSValue *argv) {
|
||||
SDL_Window *window = js2SDL_Window(js,self);
|
||||
return JS_NewBool(js,SDL_ScreenKeyboardShown(window));
|
||||
}
|
||||
|
||||
JSValue js_window_theme(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_safe_area(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_Rect r;
|
||||
SDL_GetWindowSafeArea(w, &r);
|
||||
rect newr;
|
||||
SDL_RectToFRect(&r, &newr);
|
||||
return rect2js(js,newr);
|
||||
}
|
||||
|
||||
JSValue js_window_bordered(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowBordered(w, JS_ToBool(js,argv[0]));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_get_title(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
const char *title = SDL_GetWindowTitle(w);
|
||||
return JS_NewString(js,title);
|
||||
}
|
||||
|
||||
JSValue js_window_set_title(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
const char *title = JS_ToCString(js,val);
|
||||
SDL_SetWindowTitle(w,title);
|
||||
JS_FreeCString(js,title);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_get_size(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *win = js2SDL_Window(js,self);
|
||||
int w, h;
|
||||
SDL_GetWindowSize(win, &w, &h);
|
||||
return vec22js(js, (HMM_Vec2){w,h});
|
||||
}
|
||||
|
||||
JSValue js_window_set_size(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
HMM_Vec2 size = js2vec2(js,val);
|
||||
SDL_SetWindowSize(w,size.x,size.y);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_set_icon(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_Surface *s = js2SDL_Surface(js,argv[0]);
|
||||
if (!SDL_SetWindowIcon(w,s))
|
||||
return JS_ThrowReferenceError(js, "could not set window icon: %s", SDL_GetError());
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Position getter/setter
|
||||
JSValue js_window_get_position(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
int x, y;
|
||||
SDL_GetWindowPosition(w, &x, &y);
|
||||
return vec22js(js, (HMM_Vec2){x,y});
|
||||
}
|
||||
|
||||
JSValue js_window_set_position(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
HMM_Vec2 pos = js2vec2(js,val);
|
||||
SDL_SetWindowPosition(w,pos.x,pos.y);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Mouse grab getter/setter
|
||||
JSValue js_window_get_mouseGrab(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return JS_NewBool(js, SDL_GetWindowMouseGrab(w));
|
||||
}
|
||||
|
||||
JSValue js_window_set_mouseGrab(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowMouseGrab(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Keyboard grab getter/setter
|
||||
JSValue js_window_get_keyboardGrab(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return JS_NewBool(js, SDL_GetWindowKeyboardGrab(w));
|
||||
}
|
||||
|
||||
JSValue js_window_set_keyboardGrab(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowKeyboardGrab(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Opacity getter/setter
|
||||
JSValue js_window_get_opacity(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return number2js(js, SDL_GetWindowOpacity(w));
|
||||
}
|
||||
|
||||
JSValue js_window_set_opacity(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
float opacity = js2number(js,val);
|
||||
SDL_SetWindowOpacity(w, opacity);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Minimum size getter/setter
|
||||
JSValue js_window_get_minimumSize(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
int width, height;
|
||||
SDL_GetWindowMinimumSize(w, &width, &height);
|
||||
return vec22js(js, (HMM_Vec2){width,height});
|
||||
}
|
||||
|
||||
JSValue js_window_set_minimumSize(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
HMM_Vec2 size = js2vec2(js,val);
|
||||
SDL_SetWindowMinimumSize(w,size.x,size.y);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Maximum size getter/setter
|
||||
JSValue js_window_get_maximumSize(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
int width, height;
|
||||
SDL_GetWindowMaximumSize(w, &width, &height);
|
||||
return vec22js(js, (HMM_Vec2){width,height});
|
||||
}
|
||||
|
||||
JSValue js_window_set_maximumSize(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
HMM_Vec2 size = js2vec2(js,val);
|
||||
SDL_SetWindowMaximumSize(w,size.x,size.y);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Resizable setter (read from flags)
|
||||
JSValue js_window_get_resizable(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_RESIZABLE);
|
||||
}
|
||||
|
||||
JSValue js_window_set_resizable(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowResizable(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Bordered getter/setter
|
||||
JSValue js_window_get_bordered(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, !(flags & SDL_WINDOW_BORDERLESS));
|
||||
}
|
||||
|
||||
JSValue js_window_set_bordered(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowBordered(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Always on top getter/setter
|
||||
JSValue js_window_get_alwaysOnTop(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_ALWAYS_ON_TOP);
|
||||
}
|
||||
|
||||
JSValue js_window_set_alwaysOnTop(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowAlwaysOnTop(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Fullscreen getter/setter
|
||||
JSValue js_window_get_fullscreen(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_FULLSCREEN);
|
||||
}
|
||||
|
||||
JSValue js_window_set_fullscreen(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowFullscreen(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Focusable setter
|
||||
JSValue js_window_get_focusable(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, !(flags & SDL_WINDOW_NOT_FOCUSABLE));
|
||||
}
|
||||
|
||||
JSValue js_window_set_focusable(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowFocusable(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Modal setter
|
||||
JSValue js_window_get_modal(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_MODAL);
|
||||
}
|
||||
|
||||
JSValue js_window_set_modal(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SetWindowModal(w, JS_ToBool(js,val));
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Hidden/visible state
|
||||
JSValue js_window_get_visible(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, !(flags & SDL_WINDOW_HIDDEN));
|
||||
}
|
||||
|
||||
JSValue js_window_set_visible(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
if (JS_ToBool(js,val))
|
||||
SDL_ShowWindow(w);
|
||||
else
|
||||
SDL_HideWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Minimized state
|
||||
JSValue js_window_get_minimized(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_MINIMIZED);
|
||||
}
|
||||
|
||||
JSValue js_window_set_minimized(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
if (JS_ToBool(js,val))
|
||||
SDL_MinimizeWindow(w);
|
||||
else
|
||||
SDL_RestoreWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Maximized state
|
||||
JSValue js_window_get_maximized(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
return JS_NewBool(js, flags & SDL_WINDOW_MAXIMIZED);
|
||||
}
|
||||
|
||||
JSValue js_window_set_maximized(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
if (JS_ToBool(js,val))
|
||||
SDL_MaximizeWindow(w);
|
||||
else
|
||||
SDL_RestoreWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
// Other window methods
|
||||
JSValue js_window_raise(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_RaiseWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_restore(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_RestoreWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_flash(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_FlashOperation op = SDL_FLASH_BRIEFLY;
|
||||
if (argc > 0 && JS_IsString(argv[0])) {
|
||||
const char *operation = JS_ToCString(js,argv[0]);
|
||||
if (strcmp(operation, "cancel") == 0) op = SDL_FLASH_CANCEL;
|
||||
else if (strcmp(operation, "briefly") == 0) op = SDL_FLASH_BRIEFLY;
|
||||
else if (strcmp(operation, "until_focused") == 0) op = SDL_FLASH_UNTIL_FOCUSED;
|
||||
JS_FreeCString(js,operation);
|
||||
}
|
||||
SDL_FlashWindow(w, op);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_destroy(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_DestroyWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_get_id(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return number2js(js, SDL_GetWindowID(w));
|
||||
}
|
||||
|
||||
JSValue js_window_get_parent(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_Window *parent = SDL_GetWindowParent(w);
|
||||
if (!parent) return JS_NULL;
|
||||
return SDL_Window2js(js, parent);
|
||||
}
|
||||
|
||||
JSValue js_window_set_parent(JSContext *js, JSValue self, JSValue val)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_Window *parent = NULL;
|
||||
if (!JS_IsNull(val) && !JS_IsNull(val))
|
||||
parent = js2SDL_Window(js,val);
|
||||
SDL_SetWindowParent(w, parent);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_get_pixelDensity(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return number2js(js, SDL_GetWindowPixelDensity(w));
|
||||
}
|
||||
|
||||
JSValue js_window_get_displayScale(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
return number2js(js, SDL_GetWindowDisplayScale(w));
|
||||
}
|
||||
|
||||
JSValue js_window_get_sizeInPixels(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
int width, height;
|
||||
SDL_GetWindowSizeInPixels(w, &width, &height);
|
||||
return vec22js(js, (HMM_Vec2){width,height});
|
||||
}
|
||||
|
||||
// Surface related
|
||||
JSValue js_window_get_surface(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_Surface *surf = SDL_GetWindowSurface(w);
|
||||
if (!surf) return JS_NULL;
|
||||
return SDL_Surface2js(js, surf);
|
||||
}
|
||||
|
||||
JSValue js_window_updateSurface(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
if (!SDL_UpdateWindowSurface(w))
|
||||
return JS_ThrowReferenceError(js, "Failed to update window surface: %s", SDL_GetError());
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_updateSurfaceRects(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
|
||||
if (!JS_IsArray(js, argv[0]))
|
||||
return JS_ThrowTypeError(js, "Expected array of rectangles");
|
||||
|
||||
int len = JS_ArrayLength(js, argv[0]);
|
||||
SDL_Rect rects[len];
|
||||
|
||||
for (int i = 0; i < len; i++) {
|
||||
JSValue val = JS_GetPropertyUint32(js, argv[0], i);
|
||||
rect r = js2rect(js, val);
|
||||
rects[i] = (SDL_Rect){r.x, r.y, r.w, r.h};
|
||||
JS_FreeValue(js, val);
|
||||
}
|
||||
|
||||
if (!SDL_UpdateWindowSurfaceRects(w, rects, len))
|
||||
return JS_ThrowReferenceError(js, "Failed to update window surface rects: %s", SDL_GetError());
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
JSValue js_window_get_flags(JSContext *js, JSValue self)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_WindowFlags flags = SDL_GetWindowFlags(w);
|
||||
|
||||
JSValue ret = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, ret, "fullscreen", JS_NewBool(js, flags & SDL_WINDOW_FULLSCREEN));
|
||||
JS_SetPropertyStr(js, ret, "opengl", JS_NewBool(js, flags & SDL_WINDOW_OPENGL));
|
||||
JS_SetPropertyStr(js, ret, "occluded", JS_NewBool(js, flags & SDL_WINDOW_OCCLUDED));
|
||||
JS_SetPropertyStr(js, ret, "hidden", JS_NewBool(js, flags & SDL_WINDOW_HIDDEN));
|
||||
JS_SetPropertyStr(js, ret, "borderless", JS_NewBool(js, flags & SDL_WINDOW_BORDERLESS));
|
||||
JS_SetPropertyStr(js, ret, "resizable", JS_NewBool(js, flags & SDL_WINDOW_RESIZABLE));
|
||||
JS_SetPropertyStr(js, ret, "minimized", JS_NewBool(js, flags & SDL_WINDOW_MINIMIZED));
|
||||
JS_SetPropertyStr(js, ret, "maximized", JS_NewBool(js, flags & SDL_WINDOW_MAXIMIZED));
|
||||
JS_SetPropertyStr(js, ret, "mouseGrabbed", JS_NewBool(js, flags & SDL_WINDOW_MOUSE_GRABBED));
|
||||
JS_SetPropertyStr(js, ret, "inputFocus", JS_NewBool(js, flags & SDL_WINDOW_INPUT_FOCUS));
|
||||
JS_SetPropertyStr(js, ret, "mouseFocus", JS_NewBool(js, flags & SDL_WINDOW_MOUSE_FOCUS));
|
||||
JS_SetPropertyStr(js, ret, "external", JS_NewBool(js, flags & SDL_WINDOW_EXTERNAL));
|
||||
JS_SetPropertyStr(js, ret, "modal", JS_NewBool(js, flags & SDL_WINDOW_MODAL));
|
||||
JS_SetPropertyStr(js, ret, "highPixelDensity", JS_NewBool(js, flags & SDL_WINDOW_HIGH_PIXEL_DENSITY));
|
||||
JS_SetPropertyStr(js, ret, "mouseCapture", JS_NewBool(js, flags & SDL_WINDOW_MOUSE_CAPTURE));
|
||||
JS_SetPropertyStr(js, ret, "mouseRelativeMode", JS_NewBool(js, flags & SDL_WINDOW_MOUSE_RELATIVE_MODE));
|
||||
JS_SetPropertyStr(js, ret, "alwaysOnTop", JS_NewBool(js, flags & SDL_WINDOW_ALWAYS_ON_TOP));
|
||||
JS_SetPropertyStr(js, ret, "utility", JS_NewBool(js, flags & SDL_WINDOW_UTILITY));
|
||||
JS_SetPropertyStr(js, ret, "tooltip", JS_NewBool(js, flags & SDL_WINDOW_TOOLTIP));
|
||||
JS_SetPropertyStr(js, ret, "popupMenu", JS_NewBool(js, flags & SDL_WINDOW_POPUP_MENU));
|
||||
JS_SetPropertyStr(js, ret, "keyboardGrabbed", JS_NewBool(js, flags & SDL_WINDOW_KEYBOARD_GRABBED));
|
||||
JS_SetPropertyStr(js, ret, "vulkan", JS_NewBool(js, flags & SDL_WINDOW_VULKAN));
|
||||
JS_SetPropertyStr(js, ret, "metal", JS_NewBool(js, flags & SDL_WINDOW_METAL));
|
||||
JS_SetPropertyStr(js, ret, "transparent", JS_NewBool(js, flags & SDL_WINDOW_TRANSPARENT));
|
||||
JS_SetPropertyStr(js, ret, "notFocusable", JS_NewBool(js, flags & SDL_WINDOW_NOT_FOCUSABLE));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JSValue js_window_sync(JSContext *js, JSValue self, int argc, JSValue *argv)
|
||||
{
|
||||
SDL_Window *w = js2SDL_Window(js,self);
|
||||
SDL_SyncWindow(w);
|
||||
return JS_NULL;
|
||||
}
|
||||
|
||||
static const JSCFunctionListEntry js_SDL_Window_funcs[] = {
|
||||
MIST_FUNC_DEF(SDL_Window, fullscreen, 0),
|
||||
MIST_FUNC_DEF(SDL_Window, keyboard_shown, 0),
|
||||
MIST_FUNC_DEF(window, theme, 0),
|
||||
MIST_FUNC_DEF(window, safe_area, 0),
|
||||
MIST_FUNC_DEF(window, set_icon, 1),
|
||||
MIST_FUNC_DEF(window, raise, 0),
|
||||
MIST_FUNC_DEF(window, restore, 0),
|
||||
MIST_FUNC_DEF(window, flash, 1),
|
||||
MIST_FUNC_DEF(window, destroy, 0),
|
||||
MIST_FUNC_DEF(window, sync, 0),
|
||||
CGETSET_ADD(window, title),
|
||||
CGETSET_ADD(window, size),
|
||||
CGETSET_ADD(window, position),
|
||||
CGETSET_ADD(window, mouseGrab),
|
||||
CGETSET_ADD(window, keyboardGrab),
|
||||
CGETSET_ADD(window, opacity),
|
||||
CGETSET_ADD(window, minimumSize),
|
||||
CGETSET_ADD(window, maximumSize),
|
||||
CGETSET_ADD(window, resizable),
|
||||
CGETSET_ADD(window, bordered),
|
||||
CGETSET_ADD(window, alwaysOnTop),
|
||||
CGETSET_ADD(window, fullscreen),
|
||||
CGETSET_ADD(window, focusable),
|
||||
CGETSET_ADD(window, modal),
|
||||
CGETSET_ADD(window, visible),
|
||||
CGETSET_ADD(window, minimized),
|
||||
CGETSET_ADD(window, maximized),
|
||||
CGETSET_ADD(window, parent),
|
||||
JS_CGETSET_DEF("id", js_window_get_id, NULL),
|
||||
JS_CGETSET_DEF("pixelDensity", js_window_get_pixelDensity, NULL),
|
||||
JS_CGETSET_DEF("displayScale", js_window_get_displayScale, NULL),
|
||||
JS_CGETSET_DEF("sizeInPixels", js_window_get_sizeInPixels, NULL),
|
||||
JS_CGETSET_DEF("flags", js_window_get_flags, NULL),
|
||||
JS_CGETSET_DEF("surface", js_window_get_surface, NULL),
|
||||
MIST_FUNC_DEF(window, updateSurface, 0),
|
||||
MIST_FUNC_DEF(window, updateSurfaceRects, 1),
|
||||
};
|
||||
|
||||
// Cursor creation function
|
||||
JSC_CCALL(sdl_create_cursor,
|
||||
SDL_Surface *surf = js2SDL_Surface(js, argv[0]);
|
||||
if (!surf) return JS_ThrowReferenceError(js, "Invalid surface");
|
||||
|
||||
HMM_Vec2 hot = {0, 0};
|
||||
if (argc > 1) hot = js2vec2(js, argv[1]);
|
||||
|
||||
SDL_Cursor *cursor = SDL_CreateColorCursor(surf, hot.x, hot.y);
|
||||
if (!cursor) return JS_ThrowReferenceError(js, "Failed to create cursor: %s", SDL_GetError());
|
||||
|
||||
return SDL_Cursor2js(js, cursor);
|
||||
)
|
||||
|
||||
// Set cursor function
|
||||
JSC_CCALL(sdl_set_cursor,
|
||||
SDL_Cursor *cursor = js2SDL_Cursor(js, argv[0]);
|
||||
|
||||
if (!cursor) return JS_ThrowReferenceError(js, "Invalid cursor");
|
||||
|
||||
SDL_SetCursor(cursor);
|
||||
)
|
||||
|
||||
CELL_USE_INIT(
|
||||
if (!SDL_Init(SDL_INIT_VIDEO))
|
||||
return JS_ThrowInternalError(js, "Unable to initialize video subsystem: %s", SDL_GetError());
|
||||
|
||||
JSValue ret = JS_NewObject(js);
|
||||
|
||||
JS_SetPropertyStr(js, ret, "window", QJSCLASSPREP_FUNCS_CTOR(SDL_Window, 1));
|
||||
|
||||
QJSCLASSPREP_NO_FUNCS(SDL_Cursor);
|
||||
|
||||
// Add cursor functions
|
||||
JS_SetPropertyStr(js, ret, "createCursor", JS_NewCFunction(js, js_sdl_create_cursor, "createCursor", 2));
|
||||
JS_SetPropertyStr(js, ret, "setCursor", JS_NewCFunction(js, js_sdl_set_cursor, "setCursor", 1));
|
||||
|
||||
return ret;
|
||||
)
|
||||
867
sdl/video.ce
867
sdl/video.ce
@@ -1,867 +0,0 @@
|
||||
var video = use('sdl/video');
|
||||
var imgui = use('imgui');
|
||||
|
||||
log.console("BAD")
|
||||
|
||||
// SDL Video Actor
|
||||
// This actor runs on the main thread and handles all SDL video operations
|
||||
var surface = use('sdl/surface');
|
||||
var input = use('input')
|
||||
|
||||
var ren
|
||||
var win
|
||||
|
||||
var default_window = {
|
||||
// Basic properties
|
||||
title: "Prosperon Window",
|
||||
width: 640,
|
||||
height: 480,
|
||||
|
||||
// Position - can be numbers or "centered"
|
||||
x: null, // SDL_WINDOWPOS_null by default
|
||||
y: null, // SDL_WINDOWPOS_null by default
|
||||
|
||||
// Window behavior flags
|
||||
resizable: true,
|
||||
fullscreen: false,
|
||||
hidden: false,
|
||||
borderless: false,
|
||||
alwaysOnTop: false,
|
||||
minimized: false,
|
||||
maximized: false,
|
||||
|
||||
// Input grabbing
|
||||
mouseGrabbed: false,
|
||||
keyboardGrabbed: false,
|
||||
|
||||
// Display properties
|
||||
highPixelDensity: false,
|
||||
transparent: false,
|
||||
opacity: 1.0, // 0.0 to 1.0
|
||||
|
||||
// Focus behavior
|
||||
notFocusable: false,
|
||||
|
||||
// Special window types (mutually exclusive)
|
||||
utility: false, // Utility window (not in taskbar)
|
||||
tooltip: false, // Tooltip window (requires parent)
|
||||
popupMenu: false, // Popup menu window (requires parent)
|
||||
|
||||
// Graphics API flags (let SDL choose if not specified)
|
||||
opengl: false, // Force OpenGL context
|
||||
vulkan: false, // Force Vulkan context
|
||||
metal: false, // Force Metal context (macOS)
|
||||
|
||||
// Advanced properties
|
||||
parent: null, // Parent window for tooltips/popups/modal
|
||||
modal: false, // Modal to parent window (requires parent)
|
||||
externalGraphicsContext: false, // Use external graphics context
|
||||
|
||||
// Input handling
|
||||
textInput: true, // Enable text input on creation
|
||||
};
|
||||
|
||||
var config = object(default_window, arg[0] || {})
|
||||
win = new video.window(config);
|
||||
|
||||
// Resource tracking
|
||||
var resources = {
|
||||
texture: {},
|
||||
surface: {},
|
||||
cursor: {}
|
||||
};
|
||||
|
||||
// ID counter for resource allocation
|
||||
var next_id = 1;
|
||||
|
||||
// Helper to allocate new ID
|
||||
function allocate_id() {
|
||||
return next_id++;
|
||||
}
|
||||
|
||||
// Message handler
|
||||
$receiver(function(msg) {
|
||||
if (!msg.kind || !msg.op) {
|
||||
send(msg, {error: "Message must have 'kind' and 'op' fields"});
|
||||
return;
|
||||
}
|
||||
|
||||
var response = {};
|
||||
|
||||
// log.console(msg)
|
||||
|
||||
try {
|
||||
switch (msg.kind) {
|
||||
case 'window':
|
||||
response = handle_window(msg);
|
||||
break;
|
||||
case 'renderer':
|
||||
response = handle_renderer(msg);
|
||||
break;
|
||||
case 'texture':
|
||||
response = handle_texture(msg);
|
||||
break;
|
||||
case 'surface':
|
||||
response = handle_surface(msg);
|
||||
break;
|
||||
case 'cursor':
|
||||
response = handle_cursor(msg);
|
||||
break;
|
||||
case 'mouse':
|
||||
response = handle_mouse(msg);
|
||||
break;
|
||||
case 'keyboard':
|
||||
response = handle_keyboard(msg);
|
||||
break;
|
||||
case 'imgui':
|
||||
response = handle_imgui(msg);
|
||||
break;
|
||||
case 'input':
|
||||
response = input.get_events();
|
||||
// Filter and transform events
|
||||
if (ren && isa(response, array)) {
|
||||
var filteredEvents = [];
|
||||
var wantMouse = imgui.wantmouse();
|
||||
var wantKeys = imgui.wantkeys();
|
||||
|
||||
for (var i = 0; i < response.length; i++) {
|
||||
var event = response[i];
|
||||
var shouldInclude = true;
|
||||
|
||||
// Filter mouse events if ImGui wants mouse input
|
||||
if (wantMouse && (event.type == 'mouse_motion' ||
|
||||
event.type == 'mouse_button_down' ||
|
||||
event.type == 'mouse_button_up' ||
|
||||
event.type == 'mouse_wheel')) {
|
||||
shouldInclude = false;
|
||||
}
|
||||
|
||||
// Filter keyboard events if ImGui wants keyboard input
|
||||
if (wantKeys && (event.type == 'key_down' ||
|
||||
event.type == 'key_up' ||
|
||||
event.type == 'text_input' ||
|
||||
event.type == 'text_editing')) {
|
||||
shouldInclude = false;
|
||||
}
|
||||
|
||||
if (shouldInclude) {
|
||||
// Transform mouse coordinates from window to renderer coordinates
|
||||
if (event.pos && (event.type == 'mouse_motion' ||
|
||||
event.type == 'mouse_button_down' ||
|
||||
event.type == 'mouse_button_up' ||
|
||||
event.type == 'mouse_wheel')) {
|
||||
// Convert window coordinates to renderer logical coordinates
|
||||
var logicalPos = ren.coordsFromWindow(event.pos);
|
||||
event.pos = logicalPos;
|
||||
}
|
||||
// Handle drop events which also have position
|
||||
if (event.pos && (event.type == 'drop_file' ||
|
||||
event.type == 'drop_text' ||
|
||||
event.type == 'drop_position')) {
|
||||
var logicalPos = ren.coordsFromWindow(event.pos);
|
||||
event.pos = logicalPos;
|
||||
}
|
||||
|
||||
filteredEvents.push(event);
|
||||
}
|
||||
}
|
||||
|
||||
response = filteredEvents;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
response = {error: "Unknown kind: " + msg.kind};
|
||||
}
|
||||
} catch (e) {
|
||||
response = {error: e.toString()};
|
||||
log.error(e)
|
||||
}
|
||||
|
||||
send(msg, response);
|
||||
});
|
||||
|
||||
// Window operations
|
||||
function handle_window(msg) {
|
||||
switch (msg.op) {
|
||||
case 'destroy':
|
||||
win.destroy();
|
||||
win = null
|
||||
return {success: true};
|
||||
|
||||
case 'show':
|
||||
win.visible = true;
|
||||
return {success: true};
|
||||
|
||||
case 'hide':
|
||||
win.visible = false;
|
||||
return {success: true};
|
||||
|
||||
case 'get':
|
||||
var prop = msg.data ? msg.data.property : null;
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
|
||||
// Handle special cases
|
||||
if (prop == 'surface') {
|
||||
var surf = win.surface;
|
||||
if (!surf) return {data: null};
|
||||
var surf_id = allocate_id();
|
||||
resources.surface[surf_id] = surf;
|
||||
return {data: surf_id};
|
||||
}
|
||||
|
||||
return {data: win[prop]};
|
||||
|
||||
case 'set':
|
||||
var prop = msg.data ? msg.data.property : null;
|
||||
var value = msg.data ? msg.data.value : null;
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
|
||||
// Validate property is settable
|
||||
var readonly = ['id', 'pixelDensity', 'displayScale', 'sizeInPixels', 'flags', 'surface'];
|
||||
if (readonly.indexOf(prop) != -1) {
|
||||
return {error: "Property '" + prop + "' is read-only"};
|
||||
}
|
||||
|
||||
win[prop] = value;
|
||||
return {success: true};
|
||||
|
||||
case 'fullscreen':
|
||||
win.fullscreen();
|
||||
return {success: true};
|
||||
|
||||
case 'updateSurface':
|
||||
win.updateSurface();
|
||||
return {success: true};
|
||||
|
||||
case 'updateSurfaceRects':
|
||||
if (!msg.data || !msg.data.rects) return {error: "Missing rects array"};
|
||||
win.updateSurfaceRects(msg.data.rects);
|
||||
return {success: true};
|
||||
|
||||
case 'raise':
|
||||
win.raise();
|
||||
return {success: true};
|
||||
|
||||
case 'restore':
|
||||
win.restore();
|
||||
return {success: true};
|
||||
|
||||
case 'flash':
|
||||
win.flash(msg.data ? msg.data.operation : 'briefly');
|
||||
return {success: true};
|
||||
|
||||
case 'sync':
|
||||
win.sync();
|
||||
return {success: true};
|
||||
|
||||
case 'setIcon':
|
||||
if (!msg.data || !msg.data.surface_id) return {error: "Missing surface_id"};
|
||||
var surf = resources.surface[msg.data.surface_id];
|
||||
if (!surf) return {error: "Invalid surface id"};
|
||||
win.set_icon(surf);
|
||||
return {success: true};
|
||||
|
||||
case 'makeRenderer':
|
||||
log.console("MAKE RENDERER")
|
||||
if (ren)
|
||||
return {reason: "Already made a renderer"}
|
||||
ren = win.make_renderer()
|
||||
// Initialize ImGui with the window and renderer
|
||||
imgui.init(win, ren);
|
||||
imgui.newframe()
|
||||
return {success:true};
|
||||
|
||||
default:
|
||||
return {error: "Unknown window operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Renderer operation functions
|
||||
var renderfuncs = {
|
||||
destroy: function(msg) {
|
||||
ren = null
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
clear: function(msg) {
|
||||
ren.clear();
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
present: function(msg) {
|
||||
ren.present();
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
flush: function(msg) {
|
||||
ren.flush();
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
get: function(msg) {
|
||||
var prop = msg.data ? msg.data.property : null;
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
|
||||
// Handle special getters that might return objects
|
||||
if (prop == 'drawColor') {
|
||||
var color = ren[prop];
|
||||
if (color && typeof color == 'object') {
|
||||
// Convert color object to array format [r,g,b,a]
|
||||
return {data: [color.r || 0, color.g || 0, color.b || 0, color.a || 255]};
|
||||
}
|
||||
}
|
||||
|
||||
return {data: ren[prop]};
|
||||
},
|
||||
|
||||
set: function(msg) {
|
||||
var prop = msg.prop
|
||||
var value = msg.value
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
|
||||
if (!value) return {error: "No value to set"}
|
||||
|
||||
// Validate property is settable
|
||||
var readonly = ['window', 'name', 'outputSize', 'currentOutputSize', 'logicalPresentationRect', 'safeArea'];
|
||||
if (readonly.indexOf(prop) != -1) {
|
||||
return {error: "Property '" + prop + "' is read-only"};
|
||||
}
|
||||
|
||||
// Special handling for render target
|
||||
if (prop == 'target' && value != null && value != null) {
|
||||
var tex = resources.texture[value];
|
||||
if (!tex) return {error: "Invalid texture id"};
|
||||
value = tex;
|
||||
}
|
||||
|
||||
ren[prop] = value;
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
line: function(msg) {
|
||||
if (!msg.data || !msg.data.points) return {error: "Missing points array"};
|
||||
ren.line(msg.data.points);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
point: function(msg) {
|
||||
if (!msg.data || !msg.data.points) return {error: "Missing points"};
|
||||
ren.point(msg.data.points);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
rect: function(msg) {
|
||||
if (!msg.data || !msg.data.rect) return {error: "Missing rect"};
|
||||
ren.rect(msg.data.rect);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
fillRect: function(msg) {
|
||||
if (!msg.data || !msg.data.rect) return {error: "Missing rect"};
|
||||
ren.fillRect(msg.data.rect);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
rects: function(msg) {
|
||||
if (!msg.data || !msg.data.rects) return {error: "Missing rects"};
|
||||
ren.rects(msg.data.rects);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
lineTo: function(msg) {
|
||||
if (!msg.data || !msg.data.a || !msg.data.b) return {error: "Missing points a and b"};
|
||||
ren.lineTo(msg.data.a, msg.data.b);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
texture: function(msg) {
|
||||
if (!msg.data) return {error: "Missing texture data"};
|
||||
var tex_id = msg.data.texture_id;
|
||||
if (!tex_id || !resources.texture[tex_id]) return {error: "Invalid texture id"};
|
||||
ren.texture(
|
||||
resources.texture[tex_id],
|
||||
msg.data.src,
|
||||
msg.data.dst,
|
||||
msg.data.angle || 0,
|
||||
msg.data.anchor || {x:0.5, y:0.5}
|
||||
);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
copyTexture: function(msg) {
|
||||
if (!msg.data) return {error: "Missing texture data"};
|
||||
var tex_id = msg.data.texture_id;
|
||||
if (!tex_id || !resources.texture[tex_id]) return {error: "Invalid texture id"};
|
||||
var tex = resources.texture[tex_id];
|
||||
|
||||
// Use the texture method with normalized coordinates
|
||||
ren.texture(
|
||||
tex,
|
||||
msg.data.src || {x:0, y:0, width:tex.width, height:tex.height},
|
||||
msg.data.dest || {x:0, y:0, width:tex.width, height:tex.height},
|
||||
0, // No rotation
|
||||
{x:0, y:0} // Top-left anchor
|
||||
);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
sprite: function(msg) {
|
||||
if (!msg.data || !msg.data.sprite) return {error: "Missing sprite data"};
|
||||
ren.sprite(msg.data.sprite);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
geometry: function(msg) {
|
||||
if (!msg.data) return {error: "Missing geometry data"};
|
||||
var tex_id = msg.data.texture_id;
|
||||
var tex = tex_id ? resources.texture[tex_id] : null;
|
||||
ren.geometry(tex, msg.data.geometry);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
geometry_raw: function geometry_raw(msg) {
|
||||
var geom = msg.data
|
||||
ren.geometry_raw(resources.texture[geom.texture_id], geom.xy, geom.xy_stride, geom.color, geom.color_stride, geom.uv, geom.uv_stride, geom.num_vertices, geom.indices, geom.num_indices, geom.size_indices);
|
||||
},
|
||||
|
||||
debugText: function(msg) {
|
||||
if (!msg.data || !msg.data.text) return {error: "Missing text"};
|
||||
ren.debugText([msg.data.pos.x, msg.data.pos.y], msg.data.text);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
clipEnabled: function(msg) {
|
||||
return {data: ren.clipEnabled()};
|
||||
},
|
||||
|
||||
texture9Grid: function(msg) {
|
||||
if (!msg.data) return {error: "Missing data"};
|
||||
var tex_id = msg.data.texture_id;
|
||||
if (!tex_id || !resources.texture[tex_id]) return {error: "Invalid texture id"};
|
||||
ren.texture9Grid(
|
||||
resources.texture[tex_id],
|
||||
msg.data.src,
|
||||
msg.data.leftWidth,
|
||||
msg.data.rightWidth,
|
||||
msg.data.topHeight,
|
||||
msg.data.bottomHeight,
|
||||
msg.data.scale,
|
||||
msg.data.dst
|
||||
);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
textureTiled: function(msg) {
|
||||
if (!msg.data) return {error: "Missing data"};
|
||||
var tex_id = msg.data.texture_id;
|
||||
if (!tex_id || !resources.texture[tex_id]) return {error: "Invalid texture id"};
|
||||
ren.textureTiled(
|
||||
resources.texture[tex_id],
|
||||
msg.data.src,
|
||||
msg.data.scale || 1.0,
|
||||
msg.data.dst
|
||||
);
|
||||
return {success: true};
|
||||
},
|
||||
|
||||
readPixels: function(msg) {
|
||||
var surf = ren.readPixels(msg.data ? msg.data.rect : null);
|
||||
if (!surf) return {error: "Failed to read pixels"};
|
||||
var surf_id = allocate_id();
|
||||
resources.surface[surf_id] = surf;
|
||||
return {id: surf_id};
|
||||
},
|
||||
|
||||
loadTexture: function(msg) {
|
||||
if (!msg.data) throw new Error("Missing data")
|
||||
|
||||
var tex;
|
||||
// Direct surface data
|
||||
var surf = new surface(msg.data)
|
||||
|
||||
if (!surf)
|
||||
throw new Error("Must provide surface_id or surface data")
|
||||
|
||||
tex = ren.load_texture(surf);
|
||||
|
||||
if (!tex) throw new Error("Failed to load texture")
|
||||
|
||||
// Set pixel mode to nearest for all textures
|
||||
tex.scaleMode = "nearest"
|
||||
|
||||
var tex_id = allocate_id();
|
||||
resources.texture[tex_id] = tex;
|
||||
return {
|
||||
id: tex_id,
|
||||
};
|
||||
},
|
||||
|
||||
coordsFromWindow: function(msg) {
|
||||
if (!msg.data || !msg.data.pos) return {error: "Missing pos"};
|
||||
return {data: ren.coordsFromWindow(msg.data.pos)};
|
||||
},
|
||||
|
||||
coordsToWindow: function(msg) {
|
||||
if (!msg.data || !msg.data.pos) return {error: "Missing pos"};
|
||||
return {data: ren.coordsToWindow(msg.data.pos)};
|
||||
},
|
||||
|
||||
batch: function(msg) {
|
||||
if (!msg.data || !isa(msg.data, array)) return {error: "Missing or invalid data array"};
|
||||
|
||||
for (var i = 0; i < msg.data.length; i++)
|
||||
handle_renderer(msg.data[i]);
|
||||
|
||||
return {success:true};
|
||||
},
|
||||
|
||||
imgui_render: function(msg) {
|
||||
imgui.endframe(ren);
|
||||
imgui.newframe()
|
||||
return {success: true};
|
||||
}
|
||||
};
|
||||
|
||||
// Renderer operations
|
||||
function handle_renderer(msg) {
|
||||
if (!ren) return{reason:'no renderer!'}
|
||||
|
||||
var func = renderfuncs[msg.op];
|
||||
if (func) {
|
||||
return func(msg);
|
||||
} else {
|
||||
return {error: "Unknown renderer operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Texture operations
|
||||
function handle_texture(msg) {
|
||||
// Special case: create needs a renderer
|
||||
if (msg.op == 'create') {
|
||||
if (!msg.data) return {error: "Missing texture data"};
|
||||
var ren_id = msg.data.renderer_id;
|
||||
if (!ren_id || !resources.renderer[ren_id]) return {error: "Invalid renderer id"};
|
||||
|
||||
var tex;
|
||||
var renderer = resources.renderer[ren_id];
|
||||
|
||||
// Create from surface
|
||||
if (msg.data.surface_id) {
|
||||
var surf = resources.surface[msg.data.surface_id];
|
||||
if (!surf) return {error: "Invalid surface id"};
|
||||
tex = new video.texture(renderer, surf);
|
||||
}
|
||||
// Create from properties
|
||||
else if (msg.data.width && msg.data.height) {
|
||||
tex = new video.texture(renderer, {
|
||||
width: msg.data.width,
|
||||
height: msg.data.height,
|
||||
format: msg.data.format || 'rgba8888',
|
||||
pixels: msg.data.pixels,
|
||||
pitch: msg.data.pitch
|
||||
});
|
||||
}
|
||||
else {
|
||||
log.console(msg.data)
|
||||
return {error: "Must provide either surface_id or width/height"};
|
||||
}
|
||||
|
||||
// Set pixel mode to nearest for all textures
|
||||
tex.scaleMode = "nearest"
|
||||
|
||||
var tex_id = allocate_id();
|
||||
resources.texture[tex_id] = tex;
|
||||
return {id: tex_id, data: {size: tex.size}};
|
||||
}
|
||||
|
||||
// All other operations require a valid texture ID
|
||||
if (!msg.id || !resources.texture[msg.id]) {
|
||||
return {error: "Invalid texture id: " + msg.id};
|
||||
}
|
||||
|
||||
var tex = resources.texture[msg.id];
|
||||
|
||||
switch (msg.op) {
|
||||
case 'destroy':
|
||||
delete resources.texture[msg.id];
|
||||
// Texture is automatically destroyed when all references are gone
|
||||
return {success: true};
|
||||
|
||||
case 'get':
|
||||
var prop = msg.data ? msg.data.property : null;
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
return {data: tex[prop]};
|
||||
|
||||
case 'set':
|
||||
var prop = msg.data ? msg.data.property : null;
|
||||
var value = msg.data ? msg.data.value : null;
|
||||
if (!prop) return {error: "Missing property name"};
|
||||
|
||||
// Validate property is settable
|
||||
var readonly = ['size', 'width', 'height'];
|
||||
if (readonly.indexOf(prop) != -1) {
|
||||
return {error: "Property '" + prop + "' is read-only"};
|
||||
}
|
||||
|
||||
tex[prop] = value;
|
||||
return {success: true};
|
||||
|
||||
case 'update':
|
||||
if (!msg.data) return {error: "Missing update data"};
|
||||
tex.update(
|
||||
msg.data.rect || null,
|
||||
msg.data.pixels,
|
||||
msg.data.pitch || 0
|
||||
);
|
||||
return {success: true};
|
||||
|
||||
case 'lock':
|
||||
var result = tex.lock(msg.data ? msg.data.rect : null);
|
||||
return {data: result};
|
||||
|
||||
case 'unlock':
|
||||
tex.unlock();
|
||||
return {success: true};
|
||||
|
||||
case 'query':
|
||||
return {data: tex.query()};
|
||||
|
||||
default:
|
||||
return {error: "Unknown texture operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Surface operations (mainly for cleanup)
|
||||
function handle_surface(msg) {
|
||||
switch (msg.op) {
|
||||
case 'destroy':
|
||||
if (!msg.id || !resources.surface[msg.id]) {
|
||||
return {error: "Invalid surface id: " + msg.id};
|
||||
}
|
||||
delete resources.surface[msg.id];
|
||||
return {success: true};
|
||||
|
||||
default:
|
||||
return {error: "Unknown surface operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Cursor operations
|
||||
function handle_cursor(msg) {
|
||||
switch (msg.op) {
|
||||
case 'create':
|
||||
var surf = new surface(msg.data)
|
||||
|
||||
var hotspot = msg.data.hotspot || [0, 0];
|
||||
var cursor = video.createCursor(surf, hotspot);
|
||||
|
||||
var cursor_id = allocate_id();
|
||||
resources.cursor[cursor_id] = cursor;
|
||||
return {id: cursor_id};
|
||||
|
||||
case 'set':
|
||||
var cursor = null;
|
||||
if (msg.id && resources.cursor[msg.id]) {
|
||||
cursor = resources.cursor[msg.id];
|
||||
}
|
||||
video.setCursor(cursor);
|
||||
return {success: true};
|
||||
|
||||
case 'destroy':
|
||||
if (!msg.id || !resources.cursor[msg.id]) {
|
||||
return {error: "Invalid cursor id: " + msg.id};
|
||||
}
|
||||
delete resources.cursor[msg.id];
|
||||
return {success: true};
|
||||
|
||||
default:
|
||||
return {error: "Unknown cursor operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function to create window and renderer
|
||||
prosperon.endowments = prosperon.endowments || {};
|
||||
|
||||
// Mouse operations
|
||||
function handle_mouse(msg) {
|
||||
var mouse = video.mouse;
|
||||
|
||||
switch (msg.op) {
|
||||
case 'show':
|
||||
if (msg.data == null) return {error: "Missing show parameter"};
|
||||
mouse.show(msg.data);
|
||||
return {success: true};
|
||||
|
||||
case 'capture':
|
||||
if (msg.data == null) return {error: "Missing capture parameter"};
|
||||
mouse.capture(msg.data);
|
||||
return {success: true};
|
||||
|
||||
case 'get_state':
|
||||
return {data: mouse.get_state()};
|
||||
|
||||
case 'get_global_state':
|
||||
return {data: mouse.get_global_state()};
|
||||
|
||||
case 'get_relative_state':
|
||||
return {data: mouse.get_relative_state()};
|
||||
|
||||
case 'warp_global':
|
||||
if (!msg.data) return {error: "Missing position"};
|
||||
mouse.warp_global(msg.data);
|
||||
return {success: true};
|
||||
|
||||
case 'warp_in_window':
|
||||
if (!msg.data || !msg.data.window_id || !msg.data.pos)
|
||||
return {error: "Missing window_id or position"};
|
||||
var window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
mouse.warp_in_window(window, msg.data.pos);
|
||||
return {success: true};
|
||||
|
||||
case 'cursor_visible':
|
||||
return {data: mouse.cursor_visible()};
|
||||
|
||||
case 'get_cursor':
|
||||
var cursor = mouse.get_cursor();
|
||||
if (!cursor) return {data: null};
|
||||
// Find or create cursor ID
|
||||
for (var id in resources.cursor) {
|
||||
if (resources.cursor[id] == cursor) {
|
||||
return {data: id};
|
||||
}
|
||||
}
|
||||
// Not tracked, add it
|
||||
var cursor_id = allocate_id();
|
||||
resources.cursor[cursor_id] = cursor;
|
||||
return {data: cursor_id};
|
||||
|
||||
case 'get_default_cursor':
|
||||
var cursor = mouse.get_default_cursor();
|
||||
if (!cursor) return {data: null};
|
||||
// Find or create cursor ID
|
||||
for (var id in resources.cursor) {
|
||||
if (resources.cursor[id] == cursor) {
|
||||
return {data: id};
|
||||
}
|
||||
}
|
||||
// Not tracked, add it
|
||||
var cursor_id = allocate_id();
|
||||
resources.cursor[cursor_id] = cursor;
|
||||
return {data: cursor_id};
|
||||
|
||||
case 'create_system_cursor':
|
||||
if (msg.data == null) return {error: "Missing cursor type"};
|
||||
var cursor = mouse.create_system_cursor(msg.data);
|
||||
var cursor_id = allocate_id();
|
||||
resources.cursor[cursor_id] = cursor;
|
||||
return {id: cursor_id};
|
||||
|
||||
case 'get_focus':
|
||||
var window = mouse.get_focus();
|
||||
if (!window) return {data: null};
|
||||
// Find window ID
|
||||
for (var id in resources.window) {
|
||||
if (resources.window[id] == window) {
|
||||
return {data: id};
|
||||
}
|
||||
}
|
||||
// Not tracked, add it
|
||||
var win_id = allocate_id();
|
||||
resources.window[win_id] = window;
|
||||
return {data: win_id};
|
||||
|
||||
default:
|
||||
return {error: "Unknown mouse operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
|
||||
// Keyboard operations
|
||||
function handle_keyboard(msg) {
|
||||
var keyboard = video.keyboard;
|
||||
|
||||
switch (msg.op) {
|
||||
case 'get_state':
|
||||
return {data: keyboard.get_state()};
|
||||
|
||||
case 'get_focus':
|
||||
var window = keyboard.get_focus();
|
||||
if (!window) return {data: null};
|
||||
// Find window ID
|
||||
for (var id in resources.window) {
|
||||
if (resources.window[id] == window) {
|
||||
return {data: id};
|
||||
}
|
||||
}
|
||||
// Not tracked, add it
|
||||
var win_id = allocate_id();
|
||||
resources.window[win_id] = window;
|
||||
return {data: win_id};
|
||||
|
||||
case 'start_text_input':
|
||||
var window = null;
|
||||
if (msg.data && msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
keyboard.start_text_input(window);
|
||||
return {success: true};
|
||||
|
||||
case 'stop_text_input':
|
||||
var window = null;
|
||||
if (msg.data && msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
keyboard.stop_text_input(window);
|
||||
return {success: true};
|
||||
|
||||
case 'text_input_active':
|
||||
var window = null;
|
||||
if (msg.data && msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
return {data: keyboard.text_input_active(window)};
|
||||
|
||||
case 'get_text_input_area':
|
||||
var window = null;
|
||||
if (msg.data && msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
return {data: keyboard.get_text_input_area(window)};
|
||||
|
||||
case 'set_text_input_area':
|
||||
if (!msg.data || !msg.data.rect) return {error: "Missing rect"};
|
||||
var window = null;
|
||||
if (msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
keyboard.set_text_input_area(msg.data.rect, msg.data.cursor || 0, window);
|
||||
return {success: true};
|
||||
|
||||
case 'clear_composition':
|
||||
var window = null;
|
||||
if (msg.data && msg.data.window_id) {
|
||||
window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
}
|
||||
keyboard.clear_composition(window);
|
||||
return {success: true};
|
||||
|
||||
case 'screen_keyboard_shown':
|
||||
if (!msg.data || !msg.data.window_id) return {error: "Missing window_id"};
|
||||
var window = resources.window[msg.data.window_id];
|
||||
if (!window) return {error: "Invalid window id"};
|
||||
return {data: keyboard.screen_keyboard_shown(window)};
|
||||
|
||||
case 'reset':
|
||||
keyboard.reset();
|
||||
return {success: true};
|
||||
|
||||
default:
|
||||
return {error: "Unknown keyboard operation: " + msg.op};
|
||||
}
|
||||
}
|
||||
232
sound.cm
232
sound.cm
@@ -1,25 +1,8 @@
|
||||
/*
|
||||
* sound.cm - Audio playback system
|
||||
* sound.cm - Audio playback system for Prosperon
|
||||
*
|
||||
* OBJECTS:
|
||||
*
|
||||
* PCM - Decoded audio data (cached per file)
|
||||
* .pcm - stoned blob of f32 stereo samples at OUTPUT_RATE
|
||||
* .channels - original channel count (before conversion)
|
||||
* .sample_rate- original sample rate (before conversion)
|
||||
* .frames - total frames in pcm blob
|
||||
* .file - source file path
|
||||
*
|
||||
* Voice - A playing instance of a PCM
|
||||
* .source - reference to PCM object
|
||||
* .pos - current frame position (0-indexed)
|
||||
* .vol - volume 0.0-1.0 (default 1.0)
|
||||
* .loop - if true, loops when reaching end
|
||||
* .stopped - set to true to stop playback
|
||||
* .finish_hook- optional callback when voice finishes
|
||||
*
|
||||
* LPF state - Persistent filter state for output
|
||||
* .cutoff - 0.0-1.0 normalized frequency (higher = less filtering)
|
||||
* Uses the soundwave package for audio decoding and mixing,
|
||||
* and routes output to SDL audio.
|
||||
*
|
||||
* USAGE:
|
||||
* var audio = use('sound')
|
||||
@@ -29,175 +12,47 @@
|
||||
* voice.stopped = true // stop it
|
||||
*/
|
||||
|
||||
var io = use('cellfs')
|
||||
var res = use('resources')
|
||||
var wav = use('audio/wav')
|
||||
var mp3 = use('audio/mp3')
|
||||
var flac = use('audio/flac')
|
||||
var dsp = use('audio/dsp')
|
||||
var samplerate = use('libsamplerate/convert') // resample(blob, src_rate, dst_rate, channels)
|
||||
var Blob = use('blob')
|
||||
var io = use('cellfs')
|
||||
var res = use('resources')
|
||||
var soundwave = use('soundwave/soundwave')
|
||||
var sdl_audio = use('sdl3/audio')
|
||||
|
||||
var audio = {}
|
||||
var pcms = {}
|
||||
|
||||
// Output format constants
|
||||
var OUTPUT_RATE = 44100
|
||||
var OUTPUT_RATE = 44100
|
||||
var OUTPUT_CHANNELS = 2
|
||||
var FRAMES_PER_CHUNK = 1024
|
||||
var BYTES_PER_SAMPLE = 4 // f32
|
||||
var CHUNK_BYTES = FRAMES_PER_CHUNK * OUTPUT_CHANNELS * BYTES_PER_SAMPLE
|
||||
|
||||
// LPF settings for master output
|
||||
audio.lpf = { cutoff: 0.3, channels: OUTPUT_CHANNELS }
|
||||
// Create the audio player instance
|
||||
var player = soundwave.create({
|
||||
sample_rate: OUTPUT_RATE,
|
||||
channels: OUTPUT_CHANNELS,
|
||||
frames_per_chunk: FRAMES_PER_CHUNK
|
||||
})
|
||||
|
||||
// Keep every live voice here so GC can't collect it prematurely
|
||||
var voices = []
|
||||
|
||||
// Convert decoded audio to output format (stereo f32 at OUTPUT_RATE)
|
||||
function normalize_pcm(decoded) {
|
||||
var pcm = decoded.pcm
|
||||
var channels = decoded.channels || 1
|
||||
var rate = decoded.sample_rate || OUTPUT_RATE
|
||||
|
||||
// Resample if needed
|
||||
if (rate != OUTPUT_RATE) {
|
||||
pcm = samplerate.resample(pcm, rate, OUTPUT_RATE, channels)
|
||||
}
|
||||
|
||||
// Convert mono to stereo if needed
|
||||
if (channels == 1) {
|
||||
pcm = dsp.mono_to_stereo(pcm)
|
||||
channels = 2
|
||||
}
|
||||
|
||||
// Calculate frames (stereo f32)
|
||||
var bytes = pcm.length / 8 // blob.length is in bits
|
||||
var frames = bytes / (OUTPUT_CHANNELS * BYTES_PER_SAMPLE)
|
||||
|
||||
return {
|
||||
pcm: pcm,
|
||||
channels: OUTPUT_CHANNELS,
|
||||
sample_rate: OUTPUT_RATE,
|
||||
frames: frames,
|
||||
file: decoded.file
|
||||
}
|
||||
}
|
||||
|
||||
// Load and cache PCM data
|
||||
// Load and cache PCM data from a file path
|
||||
audio.pcm = function pcm(file) {
|
||||
file = res.find_sound(file)
|
||||
if (!file) return null
|
||||
if (pcms[file]) return pcms[file]
|
||||
|
||||
|
||||
// Check player's cache first
|
||||
if (player.pcm_cache[file]) return player.pcm_cache[file]
|
||||
|
||||
var buf = io.slurp(file)
|
||||
if (!buf) return null
|
||||
|
||||
var decoded = null
|
||||
if (file.endsWith('.wav')) {
|
||||
decoded = wav.decode(buf)
|
||||
} else if (file.endsWith('.mp3')) {
|
||||
decoded = mp3.decode(buf)
|
||||
} else if (file.endsWith('.flac')) {
|
||||
decoded = flac.decode(buf)
|
||||
}
|
||||
|
||||
if (decoded && decoded.pcm) {
|
||||
decoded.file = file
|
||||
var normalized = normalize_pcm(decoded)
|
||||
return pcms[file] = normalized
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
// Pull a chunk of audio from a voice, handling looping
|
||||
// Returns a stoned blob of stereo f32 samples, or null if voice is done
|
||||
function pull_voice_chunk(voice, frames) {
|
||||
if (voice.stopped) return null
|
||||
|
||||
var source = voice.source
|
||||
var total_frames = source.frames
|
||||
var pos = voice.pos
|
||||
var bytes_per_frame = OUTPUT_CHANNELS * BYTES_PER_SAMPLE
|
||||
var bits_per_frame = bytes_per_frame * 8
|
||||
|
||||
// Create output blob
|
||||
var out = new Blob()
|
||||
var frames_written = 0
|
||||
|
||||
while (frames_written < frames) {
|
||||
if (pos >= total_frames) {
|
||||
if (voice.loop) {
|
||||
pos = 0
|
||||
} else {
|
||||
// Voice is done - pad with silence
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// How many frames can we read from current position?
|
||||
var frames_available = total_frames - pos
|
||||
var frames_needed = frames - frames_written
|
||||
var frames_to_read = frames_available < frames_needed ? frames_available : frames_needed
|
||||
|
||||
// Read from source PCM blob
|
||||
var start_bit = pos * bits_per_frame
|
||||
var end_bit = (pos + frames_to_read) * bits_per_frame
|
||||
var chunk = source.pcm.read_blob(start_bit, end_bit)
|
||||
out.write_blob(chunk)
|
||||
|
||||
pos += frames_to_read
|
||||
frames_written += frames_to_read
|
||||
}
|
||||
|
||||
voice.pos = pos
|
||||
|
||||
// Pad with silence if we didn't fill the buffer
|
||||
if (frames_written < frames) {
|
||||
var silence_frames = frames - frames_written
|
||||
var silence = dsp.silence(silence_frames, OUTPUT_CHANNELS)
|
||||
out.write_blob(silence)
|
||||
}
|
||||
|
||||
stone(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// Remove finished voices
|
||||
function cleanup() {
|
||||
var active = []
|
||||
for (var i = 0; i < voices.length; i++) {
|
||||
var v = voices[i]
|
||||
var done = v.stopped || (!v.loop && v.pos >= v.source.frames)
|
||||
if (!done) {
|
||||
active.push(v)
|
||||
} else if (v.finish_hook) {
|
||||
v.finish_hook()
|
||||
}
|
||||
}
|
||||
voices = active
|
||||
|
||||
return player.decode(buf, file)
|
||||
}
|
||||
|
||||
// Play a sound file, returns voice object
|
||||
audio.play = function play(file, opts) {
|
||||
var pcm_data = audio.pcm(file)
|
||||
if (!pcm_data) return null
|
||||
|
||||
var voice = {
|
||||
source: pcm_data,
|
||||
pos: 0,
|
||||
vol: 1.0,
|
||||
loop: false,
|
||||
stopped: false,
|
||||
finish_hook: null
|
||||
}
|
||||
|
||||
if (opts) {
|
||||
if (opts.loop != null) voice.loop = opts.loop
|
||||
if (opts.vol != null) voice.vol = opts.vol
|
||||
}
|
||||
|
||||
voices.push(voice)
|
||||
return voice
|
||||
|
||||
return player.play(pcm_data, opts)
|
||||
}
|
||||
|
||||
// Convenience: play and return stop function
|
||||
@@ -207,45 +62,22 @@ audio.cry = function cry(file) {
|
||||
return function() { v.stopped = true }
|
||||
}
|
||||
|
||||
// Get number of active voices
|
||||
audio.voice_count = function() {
|
||||
return player.voice_count()
|
||||
}
|
||||
|
||||
// SDL audio stream setup
|
||||
var ss = use('sdl/audio')
|
||||
var feeder = ss.open_stream("playback")
|
||||
var feeder = sdl_audio.open_stream("playback")
|
||||
feeder.resume_device()
|
||||
|
||||
// Audio pump - called periodically to fill the audio buffer
|
||||
function pump() {
|
||||
while (feeder.queued() < CHUNK_BYTES * 3) {
|
||||
// Collect chunks from all active voices
|
||||
var blobs = []
|
||||
var vols = []
|
||||
|
||||
for (var i = 0; i < voices.length; i++) {
|
||||
var v = voices[i]
|
||||
if (v.stopped) continue
|
||||
var chunk = pull_voice_chunk(v, FRAMES_PER_CHUNK)
|
||||
if (chunk) {
|
||||
blobs.push(chunk)
|
||||
vols.push(v.vol)
|
||||
}
|
||||
}
|
||||
|
||||
// Mix all voice chunks
|
||||
var mixed
|
||||
if (blobs.length == 0) {
|
||||
mixed = dsp.silence(FRAMES_PER_CHUNK, OUTPUT_CHANNELS)
|
||||
} else {
|
||||
mixed = dsp.mix_blobs(blobs, vols)
|
||||
}
|
||||
|
||||
// Apply master LPF
|
||||
// var filtered = dsp.lpf(mixed, audio.lpf)
|
||||
|
||||
// Send to audio device
|
||||
var mixed = player.pull(FRAMES_PER_CHUNK)
|
||||
feeder.put(mixed)
|
||||
|
||||
cleanup()
|
||||
}
|
||||
|
||||
|
||||
$delay(pump, 1/240)
|
||||
}
|
||||
|
||||
|
||||
58
staef.c
58
staef.c
@@ -3,8 +3,6 @@
|
||||
#include "HandmadeMath.h"
|
||||
#include "stb_ds.h"
|
||||
|
||||
#include "sdl.h"
|
||||
#include <SDL3/SDL.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <ctype.h>
|
||||
@@ -13,9 +11,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "stb_image_write.h"
|
||||
#include "stb_rect_pack.h"
|
||||
|
||||
#define STB_TRUETYPE_IMPLEMENTATION
|
||||
#define STB_TRUETYPE_NO_STDIO
|
||||
#include "stb_truetype.h"
|
||||
@@ -63,7 +58,8 @@ struct sFont {
|
||||
float linegap; //pixels
|
||||
float line_height; // pixels
|
||||
struct character Characters[256];
|
||||
SDL_Surface *surface;
|
||||
unsigned char *pixels; // RGBA8 pixel data
|
||||
int atlas_size; // width and height of atlas (square)
|
||||
};
|
||||
|
||||
typedef struct sFont font;
|
||||
@@ -77,7 +73,7 @@ struct sFont *use_font;
|
||||
|
||||
void font_free(JSRuntime *rt, font *f)
|
||||
{
|
||||
if (f->surface) SDL_DestroySurface(f->surface);
|
||||
if (f->pixels) free(f->pixels);
|
||||
free(f);
|
||||
}
|
||||
|
||||
@@ -104,7 +100,6 @@ struct sFont *MakeFont(void *ttf_buffer, size_t len, int height) {
|
||||
|
||||
stbtt_fontinfo fontinfo;
|
||||
if (!stbtt_InitFont(&fontinfo, ttf_buffer, stbtt_GetFontOffsetForIndex(ttf_buffer, 0))) {
|
||||
// YughError("Failed to make font %s", fontfile);
|
||||
}
|
||||
|
||||
int ascent, descent, linegap;
|
||||
@@ -115,12 +110,17 @@ struct sFont *MakeFont(void *ttf_buffer, size_t len, int height) {
|
||||
newfont->descent = descent * s; /* descent is negative */
|
||||
newfont->linegap = linegap * s;
|
||||
newfont->line_height = (newfont->ascent - newfont->descent) + newfont->linegap;
|
||||
newfont->surface = SDL_CreateSurface(packsize,packsize, SDL_PIXELFORMAT_RGBA32);
|
||||
if (!newfont->surface) printf("SDL ERROR: %s\n", SDL_GetError());
|
||||
for (int i = 0; i < packsize; i++)
|
||||
for (int j = 0; j < packsize; j++)
|
||||
if (!SDL_WriteSurfacePixel(newfont->surface, j, i, 255,255,255,bitmap[i*packsize+j]))
|
||||
printf("SDLERROR: %s\n", SDL_GetError());
|
||||
newfont->atlas_size = packsize;
|
||||
newfont->pixels = malloc(packsize * packsize * 4); // RGBA8
|
||||
for (int i = 0; i < packsize; i++) {
|
||||
for (int j = 0; j < packsize; j++) {
|
||||
int idx = (i * packsize + j) * 4;
|
||||
newfont->pixels[idx + 0] = 255; // R
|
||||
newfont->pixels[idx + 1] = 255; // G
|
||||
newfont->pixels[idx + 2] = 255; // B
|
||||
newfont->pixels[idx + 3] = bitmap[i * packsize + j]; // A
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned char c = 32; c < 127; c++) {
|
||||
stbtt_packedchar glyph = glyphs[c - 32];
|
||||
@@ -392,29 +392,17 @@ JSC_CCALL(staef_font_new,
|
||||
|
||||
ret = font2js(js, f);
|
||||
|
||||
// Create surface data object for the font's atlas
|
||||
if (f->surface) {
|
||||
JSValue surfData = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, surfData, "width", JS_NewInt32(js, f->surface->w));
|
||||
JS_SetPropertyStr(js, surfData, "height", JS_NewInt32(js, f->surface->h));
|
||||
JS_SetPropertyStr(js, surfData, "format", pixelformat2js(js, f->surface->format));
|
||||
JS_SetPropertyStr(js, surfData, "pitch", JS_NewInt32(js, f->surface->pitch));
|
||||
// Create texture data object for the font's atlas
|
||||
if (f->pixels) {
|
||||
JSValue texData = JS_NewObject(js);
|
||||
JS_SetPropertyStr(js, texData, "width", JS_NewInt32(js, f->atlas_size));
|
||||
JS_SetPropertyStr(js, texData, "height", JS_NewInt32(js, f->atlas_size));
|
||||
JS_SetPropertyStr(js, texData, "format", JS_NewString(js, "rgba8"));
|
||||
|
||||
// Lock surface if needed
|
||||
int locked = 0;
|
||||
if (SDL_MUSTLOCK(f->surface)) {
|
||||
if (SDL_LockSurface(f->surface) < 0)
|
||||
return JS_ThrowInternalError(js, "Lock surface failed: %s", SDL_GetError());
|
||||
locked = 1;
|
||||
}
|
||||
|
||||
size_t byte_size = f->surface->pitch * f->surface->h;
|
||||
JS_SetPropertyStr(js, surfData, "pixels", js_new_blob_stoned_copy(js, f->surface->pixels, byte_size));
|
||||
|
||||
if (locked)
|
||||
SDL_UnlockSurface(f->surface);
|
||||
size_t byte_size = f->atlas_size * f->atlas_size * 4;
|
||||
JS_SetPropertyStr(js, texData, "pixels", js_new_blob_stoned_copy(js, f->pixels, byte_size));
|
||||
|
||||
JS_SetPropertyStr(js, ret, "surface", surfData);
|
||||
JS_SetPropertyStr(js, ret, "texture", texData);
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
719
stb_dxt.h
719
stb_dxt.h
@@ -1,719 +0,0 @@
|
||||
// stb_dxt.h - v1.12 - DXT1/DXT5 compressor - public domain
|
||||
// original by fabian "ryg" giesen - ported to C by stb
|
||||
// use '#define STB_DXT_IMPLEMENTATION' before including to create the implementation
|
||||
//
|
||||
// USAGE:
|
||||
// call stb_compress_dxt_block() for every block (you must pad)
|
||||
// source should be a 4x4 block of RGBA data in row-major order;
|
||||
// Alpha channel is not stored if you specify alpha=0 (but you
|
||||
// must supply some constant alpha in the alpha channel).
|
||||
// You can turn on dithering and "high quality" using mode.
|
||||
//
|
||||
// version history:
|
||||
// v1.12 - (ryg) fix bug in single-color table generator
|
||||
// v1.11 - (ryg) avoid racy global init, better single-color tables, remove dither
|
||||
// v1.10 - (i.c) various small quality improvements
|
||||
// v1.09 - (stb) update documentation re: surprising alpha channel requirement
|
||||
// v1.08 - (stb) fix bug in dxt-with-alpha block
|
||||
// v1.07 - (stb) bc4; allow not using libc; add STB_DXT_STATIC
|
||||
// v1.06 - (stb) fix to known-broken 1.05
|
||||
// v1.05 - (stb) support bc5/3dc (Arvids Kokins), use extern "C" in C++ (Pavel Krajcevski)
|
||||
// v1.04 - (ryg) default to no rounding bias for lerped colors (as per S3TC/DX10 spec);
|
||||
// single color match fix (allow for inexact color interpolation);
|
||||
// optimal DXT5 index finder; "high quality" mode that runs multiple refinement steps.
|
||||
// v1.03 - (stb) endianness support
|
||||
// v1.02 - (stb) fix alpha encoding bug
|
||||
// v1.01 - (stb) fix bug converting to RGB that messed up quality, thanks ryg & cbloom
|
||||
// v1.00 - (stb) first release
|
||||
//
|
||||
// contributors:
|
||||
// Rich Geldreich (more accurate index selection)
|
||||
// Kevin Schmidt (#defines for "freestanding" compilation)
|
||||
// github:ppiastucki (BC4 support)
|
||||
// Ignacio Castano - improve DXT endpoint quantization
|
||||
// Alan Hickman - static table initialization
|
||||
//
|
||||
// LICENSE
|
||||
//
|
||||
// See end of file for license information.
|
||||
|
||||
#ifndef STB_INCLUDE_STB_DXT_H
|
||||
#define STB_INCLUDE_STB_DXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef STB_DXT_STATIC
|
||||
#define STBDDEF static
|
||||
#else
|
||||
#define STBDDEF extern
|
||||
#endif
|
||||
|
||||
// compression mode (bitflags)
|
||||
#define STB_DXT_NORMAL 0
|
||||
#define STB_DXT_DITHER 1 // use dithering. was always dubious, now deprecated. does nothing!
|
||||
#define STB_DXT_HIGHQUAL 2 // high quality mode, does two refinement steps instead of 1. ~30-40% slower.
|
||||
|
||||
STBDDEF void stb_compress_dxt_block(unsigned char *dest, const unsigned char *src_rgba_four_bytes_per_pixel, int alpha, int mode);
|
||||
STBDDEF void stb_compress_bc4_block(unsigned char *dest, const unsigned char *src_r_one_byte_per_pixel);
|
||||
STBDDEF void stb_compress_bc5_block(unsigned char *dest, const unsigned char *src_rg_two_byte_per_pixel);
|
||||
|
||||
#define STB_COMPRESS_DXT_BLOCK
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // STB_INCLUDE_STB_DXT_H
|
||||
|
||||
#ifdef STB_DXT_IMPLEMENTATION
|
||||
|
||||
// configuration options for DXT encoder. set them in the project/makefile or just define
|
||||
// them at the top.
|
||||
|
||||
// STB_DXT_USE_ROUNDING_BIAS
|
||||
// use a rounding bias during color interpolation. this is closer to what "ideal"
|
||||
// interpolation would do but doesn't match the S3TC/DX10 spec. old versions (pre-1.03)
|
||||
// implicitly had this turned on.
|
||||
//
|
||||
// in case you're targeting a specific type of hardware (e.g. console programmers):
|
||||
// NVidia and Intel GPUs (as of 2010) as well as DX9 ref use DXT decoders that are closer
|
||||
// to STB_DXT_USE_ROUNDING_BIAS. AMD/ATI, S3 and DX10 ref are closer to rounding with no bias.
|
||||
// you also see "(a*5 + b*3) / 8" on some old GPU designs.
|
||||
// #define STB_DXT_USE_ROUNDING_BIAS
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#if !defined(STBD_FABS)
|
||||
#include <math.h>
|
||||
#endif
|
||||
|
||||
#ifndef STBD_FABS
|
||||
#define STBD_FABS(x) fabs(x)
|
||||
#endif
|
||||
|
||||
static const unsigned char stb__OMatch5[256][2] = {
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 1 },
|
||||
{ 1, 1 }, { 1, 1 }, { 1, 2 }, { 0, 4 }, { 2, 1 }, { 2, 1 }, { 2, 1 }, { 2, 2 },
|
||||
{ 2, 2 }, { 2, 2 }, { 2, 3 }, { 1, 5 }, { 3, 2 }, { 3, 2 }, { 4, 0 }, { 3, 3 },
|
||||
{ 3, 3 }, { 3, 3 }, { 3, 4 }, { 3, 4 }, { 3, 4 }, { 3, 5 }, { 4, 3 }, { 4, 3 },
|
||||
{ 5, 2 }, { 4, 4 }, { 4, 4 }, { 4, 5 }, { 4, 5 }, { 5, 4 }, { 5, 4 }, { 5, 4 },
|
||||
{ 6, 3 }, { 5, 5 }, { 5, 5 }, { 5, 6 }, { 4, 8 }, { 6, 5 }, { 6, 5 }, { 6, 5 },
|
||||
{ 6, 6 }, { 6, 6 }, { 6, 6 }, { 6, 7 }, { 5, 9 }, { 7, 6 }, { 7, 6 }, { 8, 4 },
|
||||
{ 7, 7 }, { 7, 7 }, { 7, 7 }, { 7, 8 }, { 7, 8 }, { 7, 8 }, { 7, 9 }, { 8, 7 },
|
||||
{ 8, 7 }, { 9, 6 }, { 8, 8 }, { 8, 8 }, { 8, 9 }, { 8, 9 }, { 9, 8 }, { 9, 8 },
|
||||
{ 9, 8 }, { 10, 7 }, { 9, 9 }, { 9, 9 }, { 9, 10 }, { 8, 12 }, { 10, 9 }, { 10, 9 },
|
||||
{ 10, 9 }, { 10, 10 }, { 10, 10 }, { 10, 10 }, { 10, 11 }, { 9, 13 }, { 11, 10 }, { 11, 10 },
|
||||
{ 12, 8 }, { 11, 11 }, { 11, 11 }, { 11, 11 }, { 11, 12 }, { 11, 12 }, { 11, 12 }, { 11, 13 },
|
||||
{ 12, 11 }, { 12, 11 }, { 13, 10 }, { 12, 12 }, { 12, 12 }, { 12, 13 }, { 12, 13 }, { 13, 12 },
|
||||
{ 13, 12 }, { 13, 12 }, { 14, 11 }, { 13, 13 }, { 13, 13 }, { 13, 14 }, { 12, 16 }, { 14, 13 },
|
||||
{ 14, 13 }, { 14, 13 }, { 14, 14 }, { 14, 14 }, { 14, 14 }, { 14, 15 }, { 13, 17 }, { 15, 14 },
|
||||
{ 15, 14 }, { 16, 12 }, { 15, 15 }, { 15, 15 }, { 15, 15 }, { 15, 16 }, { 15, 16 }, { 15, 16 },
|
||||
{ 15, 17 }, { 16, 15 }, { 16, 15 }, { 17, 14 }, { 16, 16 }, { 16, 16 }, { 16, 17 }, { 16, 17 },
|
||||
{ 17, 16 }, { 17, 16 }, { 17, 16 }, { 18, 15 }, { 17, 17 }, { 17, 17 }, { 17, 18 }, { 16, 20 },
|
||||
{ 18, 17 }, { 18, 17 }, { 18, 17 }, { 18, 18 }, { 18, 18 }, { 18, 18 }, { 18, 19 }, { 17, 21 },
|
||||
{ 19, 18 }, { 19, 18 }, { 20, 16 }, { 19, 19 }, { 19, 19 }, { 19, 19 }, { 19, 20 }, { 19, 20 },
|
||||
{ 19, 20 }, { 19, 21 }, { 20, 19 }, { 20, 19 }, { 21, 18 }, { 20, 20 }, { 20, 20 }, { 20, 21 },
|
||||
{ 20, 21 }, { 21, 20 }, { 21, 20 }, { 21, 20 }, { 22, 19 }, { 21, 21 }, { 21, 21 }, { 21, 22 },
|
||||
{ 20, 24 }, { 22, 21 }, { 22, 21 }, { 22, 21 }, { 22, 22 }, { 22, 22 }, { 22, 22 }, { 22, 23 },
|
||||
{ 21, 25 }, { 23, 22 }, { 23, 22 }, { 24, 20 }, { 23, 23 }, { 23, 23 }, { 23, 23 }, { 23, 24 },
|
||||
{ 23, 24 }, { 23, 24 }, { 23, 25 }, { 24, 23 }, { 24, 23 }, { 25, 22 }, { 24, 24 }, { 24, 24 },
|
||||
{ 24, 25 }, { 24, 25 }, { 25, 24 }, { 25, 24 }, { 25, 24 }, { 26, 23 }, { 25, 25 }, { 25, 25 },
|
||||
{ 25, 26 }, { 24, 28 }, { 26, 25 }, { 26, 25 }, { 26, 25 }, { 26, 26 }, { 26, 26 }, { 26, 26 },
|
||||
{ 26, 27 }, { 25, 29 }, { 27, 26 }, { 27, 26 }, { 28, 24 }, { 27, 27 }, { 27, 27 }, { 27, 27 },
|
||||
{ 27, 28 }, { 27, 28 }, { 27, 28 }, { 27, 29 }, { 28, 27 }, { 28, 27 }, { 29, 26 }, { 28, 28 },
|
||||
{ 28, 28 }, { 28, 29 }, { 28, 29 }, { 29, 28 }, { 29, 28 }, { 29, 28 }, { 30, 27 }, { 29, 29 },
|
||||
{ 29, 29 }, { 29, 30 }, { 29, 30 }, { 30, 29 }, { 30, 29 }, { 30, 29 }, { 30, 30 }, { 30, 30 },
|
||||
{ 30, 30 }, { 30, 31 }, { 30, 31 }, { 31, 30 }, { 31, 30 }, { 31, 30 }, { 31, 31 }, { 31, 31 },
|
||||
};
|
||||
static const unsigned char stb__OMatch6[256][2] = {
|
||||
{ 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 }, { 1, 1 }, { 1, 2 }, { 2, 1 }, { 2, 2 },
|
||||
{ 2, 2 }, { 2, 3 }, { 3, 2 }, { 3, 3 }, { 3, 3 }, { 3, 4 }, { 4, 3 }, { 4, 4 },
|
||||
{ 4, 4 }, { 4, 5 }, { 5, 4 }, { 5, 5 }, { 5, 5 }, { 5, 6 }, { 6, 5 }, { 6, 6 },
|
||||
{ 6, 6 }, { 6, 7 }, { 7, 6 }, { 7, 7 }, { 7, 7 }, { 7, 8 }, { 8, 7 }, { 8, 8 },
|
||||
{ 8, 8 }, { 8, 9 }, { 9, 8 }, { 9, 9 }, { 9, 9 }, { 9, 10 }, { 10, 9 }, { 10, 10 },
|
||||
{ 10, 10 }, { 10, 11 }, { 11, 10 }, { 8, 16 }, { 11, 11 }, { 11, 12 }, { 12, 11 }, { 9, 17 },
|
||||
{ 12, 12 }, { 12, 13 }, { 13, 12 }, { 11, 16 }, { 13, 13 }, { 13, 14 }, { 14, 13 }, { 12, 17 },
|
||||
{ 14, 14 }, { 14, 15 }, { 15, 14 }, { 14, 16 }, { 15, 15 }, { 15, 16 }, { 16, 14 }, { 16, 15 },
|
||||
{ 17, 14 }, { 16, 16 }, { 16, 17 }, { 17, 16 }, { 18, 15 }, { 17, 17 }, { 17, 18 }, { 18, 17 },
|
||||
{ 20, 14 }, { 18, 18 }, { 18, 19 }, { 19, 18 }, { 21, 15 }, { 19, 19 }, { 19, 20 }, { 20, 19 },
|
||||
{ 20, 20 }, { 20, 20 }, { 20, 21 }, { 21, 20 }, { 21, 21 }, { 21, 21 }, { 21, 22 }, { 22, 21 },
|
||||
{ 22, 22 }, { 22, 22 }, { 22, 23 }, { 23, 22 }, { 23, 23 }, { 23, 23 }, { 23, 24 }, { 24, 23 },
|
||||
{ 24, 24 }, { 24, 24 }, { 24, 25 }, { 25, 24 }, { 25, 25 }, { 25, 25 }, { 25, 26 }, { 26, 25 },
|
||||
{ 26, 26 }, { 26, 26 }, { 26, 27 }, { 27, 26 }, { 24, 32 }, { 27, 27 }, { 27, 28 }, { 28, 27 },
|
||||
{ 25, 33 }, { 28, 28 }, { 28, 29 }, { 29, 28 }, { 27, 32 }, { 29, 29 }, { 29, 30 }, { 30, 29 },
|
||||
{ 28, 33 }, { 30, 30 }, { 30, 31 }, { 31, 30 }, { 30, 32 }, { 31, 31 }, { 31, 32 }, { 32, 30 },
|
||||
{ 32, 31 }, { 33, 30 }, { 32, 32 }, { 32, 33 }, { 33, 32 }, { 34, 31 }, { 33, 33 }, { 33, 34 },
|
||||
{ 34, 33 }, { 36, 30 }, { 34, 34 }, { 34, 35 }, { 35, 34 }, { 37, 31 }, { 35, 35 }, { 35, 36 },
|
||||
{ 36, 35 }, { 36, 36 }, { 36, 36 }, { 36, 37 }, { 37, 36 }, { 37, 37 }, { 37, 37 }, { 37, 38 },
|
||||
{ 38, 37 }, { 38, 38 }, { 38, 38 }, { 38, 39 }, { 39, 38 }, { 39, 39 }, { 39, 39 }, { 39, 40 },
|
||||
{ 40, 39 }, { 40, 40 }, { 40, 40 }, { 40, 41 }, { 41, 40 }, { 41, 41 }, { 41, 41 }, { 41, 42 },
|
||||
{ 42, 41 }, { 42, 42 }, { 42, 42 }, { 42, 43 }, { 43, 42 }, { 40, 48 }, { 43, 43 }, { 43, 44 },
|
||||
{ 44, 43 }, { 41, 49 }, { 44, 44 }, { 44, 45 }, { 45, 44 }, { 43, 48 }, { 45, 45 }, { 45, 46 },
|
||||
{ 46, 45 }, { 44, 49 }, { 46, 46 }, { 46, 47 }, { 47, 46 }, { 46, 48 }, { 47, 47 }, { 47, 48 },
|
||||
{ 48, 46 }, { 48, 47 }, { 49, 46 }, { 48, 48 }, { 48, 49 }, { 49, 48 }, { 50, 47 }, { 49, 49 },
|
||||
{ 49, 50 }, { 50, 49 }, { 52, 46 }, { 50, 50 }, { 50, 51 }, { 51, 50 }, { 53, 47 }, { 51, 51 },
|
||||
{ 51, 52 }, { 52, 51 }, { 52, 52 }, { 52, 52 }, { 52, 53 }, { 53, 52 }, { 53, 53 }, { 53, 53 },
|
||||
{ 53, 54 }, { 54, 53 }, { 54, 54 }, { 54, 54 }, { 54, 55 }, { 55, 54 }, { 55, 55 }, { 55, 55 },
|
||||
{ 55, 56 }, { 56, 55 }, { 56, 56 }, { 56, 56 }, { 56, 57 }, { 57, 56 }, { 57, 57 }, { 57, 57 },
|
||||
{ 57, 58 }, { 58, 57 }, { 58, 58 }, { 58, 58 }, { 58, 59 }, { 59, 58 }, { 59, 59 }, { 59, 59 },
|
||||
{ 59, 60 }, { 60, 59 }, { 60, 60 }, { 60, 60 }, { 60, 61 }, { 61, 60 }, { 61, 61 }, { 61, 61 },
|
||||
{ 61, 62 }, { 62, 61 }, { 62, 62 }, { 62, 62 }, { 62, 63 }, { 63, 62 }, { 63, 63 }, { 63, 63 },
|
||||
};
|
||||
|
||||
static int stb__Mul8Bit(int a, int b)
|
||||
{
|
||||
int t = a*b + 128;
|
||||
return (t + (t >> 8)) >> 8;
|
||||
}
|
||||
|
||||
static void stb__From16Bit(unsigned char *out, unsigned short v)
|
||||
{
|
||||
int rv = (v & 0xf800) >> 11;
|
||||
int gv = (v & 0x07e0) >> 5;
|
||||
int bv = (v & 0x001f) >> 0;
|
||||
|
||||
// expand to 8 bits via bit replication
|
||||
out[0] = (rv * 33) >> 2;
|
||||
out[1] = (gv * 65) >> 4;
|
||||
out[2] = (bv * 33) >> 2;
|
||||
out[3] = 0;
|
||||
}
|
||||
|
||||
static unsigned short stb__As16Bit(int r, int g, int b)
|
||||
{
|
||||
return (unsigned short)((stb__Mul8Bit(r,31) << 11) + (stb__Mul8Bit(g,63) << 5) + stb__Mul8Bit(b,31));
|
||||
}
|
||||
|
||||
// linear interpolation at 1/3 point between a and b, using desired rounding type
|
||||
static int stb__Lerp13(int a, int b)
|
||||
{
|
||||
#ifdef STB_DXT_USE_ROUNDING_BIAS
|
||||
// with rounding bias
|
||||
return a + stb__Mul8Bit(b-a, 0x55);
|
||||
#else
|
||||
// without rounding bias
|
||||
// replace "/ 3" by "* 0xaaab) >> 17" if your compiler sucks or you really need every ounce of speed.
|
||||
return (2*a + b) / 3;
|
||||
#endif
|
||||
}
|
||||
|
||||
// lerp RGB color
|
||||
static void stb__Lerp13RGB(unsigned char *out, unsigned char *p1, unsigned char *p2)
|
||||
{
|
||||
out[0] = (unsigned char)stb__Lerp13(p1[0], p2[0]);
|
||||
out[1] = (unsigned char)stb__Lerp13(p1[1], p2[1]);
|
||||
out[2] = (unsigned char)stb__Lerp13(p1[2], p2[2]);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
static void stb__EvalColors(unsigned char *color,unsigned short c0,unsigned short c1)
|
||||
{
|
||||
stb__From16Bit(color+ 0, c0);
|
||||
stb__From16Bit(color+ 4, c1);
|
||||
stb__Lerp13RGB(color+ 8, color+0, color+4);
|
||||
stb__Lerp13RGB(color+12, color+4, color+0);
|
||||
}
|
||||
|
||||
// The color matching function
|
||||
static unsigned int stb__MatchColorsBlock(unsigned char *block, unsigned char *color)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
int dirr = color[0*4+0] - color[1*4+0];
|
||||
int dirg = color[0*4+1] - color[1*4+1];
|
||||
int dirb = color[0*4+2] - color[1*4+2];
|
||||
int dots[16];
|
||||
int stops[4];
|
||||
int i;
|
||||
int c0Point, halfPoint, c3Point;
|
||||
|
||||
for(i=0;i<16;i++)
|
||||
dots[i] = block[i*4+0]*dirr + block[i*4+1]*dirg + block[i*4+2]*dirb;
|
||||
|
||||
for(i=0;i<4;i++)
|
||||
stops[i] = color[i*4+0]*dirr + color[i*4+1]*dirg + color[i*4+2]*dirb;
|
||||
|
||||
// think of the colors as arranged on a line; project point onto that line, then choose
|
||||
// next color out of available ones. we compute the crossover points for "best color in top
|
||||
// half"/"best in bottom half" and then the same inside that subinterval.
|
||||
//
|
||||
// relying on this 1d approximation isn't always optimal in terms of euclidean distance,
|
||||
// but it's very close and a lot faster.
|
||||
// http://cbloomrants.blogspot.com/2008/12/12-08-08-dxtc-summary.html
|
||||
|
||||
c0Point = (stops[1] + stops[3]);
|
||||
halfPoint = (stops[3] + stops[2]);
|
||||
c3Point = (stops[2] + stops[0]);
|
||||
|
||||
for (i=15;i>=0;i--) {
|
||||
int dot = dots[i]*2;
|
||||
mask <<= 2;
|
||||
|
||||
if(dot < halfPoint)
|
||||
mask |= (dot < c0Point) ? 1 : 3;
|
||||
else
|
||||
mask |= (dot < c3Point) ? 2 : 0;
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
// The color optimization function. (Clever code, part 1)
|
||||
static void stb__OptimizeColorsBlock(unsigned char *block, unsigned short *pmax16, unsigned short *pmin16)
|
||||
{
|
||||
int mind,maxd;
|
||||
unsigned char *minp, *maxp;
|
||||
double magn;
|
||||
int v_r,v_g,v_b;
|
||||
static const int nIterPower = 4;
|
||||
float covf[6],vfr,vfg,vfb;
|
||||
|
||||
// determine color distribution
|
||||
int cov[6];
|
||||
int mu[3],min[3],max[3];
|
||||
int ch,i,iter;
|
||||
|
||||
for(ch=0;ch<3;ch++)
|
||||
{
|
||||
const unsigned char *bp = ((const unsigned char *) block) + ch;
|
||||
int muv,minv,maxv;
|
||||
|
||||
muv = minv = maxv = bp[0];
|
||||
for(i=4;i<64;i+=4)
|
||||
{
|
||||
muv += bp[i];
|
||||
if (bp[i] < minv) minv = bp[i];
|
||||
else if (bp[i] > maxv) maxv = bp[i];
|
||||
}
|
||||
|
||||
mu[ch] = (muv + 8) >> 4;
|
||||
min[ch] = minv;
|
||||
max[ch] = maxv;
|
||||
}
|
||||
|
||||
// determine covariance matrix
|
||||
for (i=0;i<6;i++)
|
||||
cov[i] = 0;
|
||||
|
||||
for (i=0;i<16;i++)
|
||||
{
|
||||
int r = block[i*4+0] - mu[0];
|
||||
int g = block[i*4+1] - mu[1];
|
||||
int b = block[i*4+2] - mu[2];
|
||||
|
||||
cov[0] += r*r;
|
||||
cov[1] += r*g;
|
||||
cov[2] += r*b;
|
||||
cov[3] += g*g;
|
||||
cov[4] += g*b;
|
||||
cov[5] += b*b;
|
||||
}
|
||||
|
||||
// convert covariance matrix to float, find principal axis via power iter
|
||||
for(i=0;i<6;i++)
|
||||
covf[i] = cov[i] / 255.0f;
|
||||
|
||||
vfr = (float) (max[0] - min[0]);
|
||||
vfg = (float) (max[1] - min[1]);
|
||||
vfb = (float) (max[2] - min[2]);
|
||||
|
||||
for(iter=0;iter<nIterPower;iter++)
|
||||
{
|
||||
float r = vfr*covf[0] + vfg*covf[1] + vfb*covf[2];
|
||||
float g = vfr*covf[1] + vfg*covf[3] + vfb*covf[4];
|
||||
float b = vfr*covf[2] + vfg*covf[4] + vfb*covf[5];
|
||||
|
||||
vfr = r;
|
||||
vfg = g;
|
||||
vfb = b;
|
||||
}
|
||||
|
||||
magn = STBD_FABS(vfr);
|
||||
if (STBD_FABS(vfg) > magn) magn = STBD_FABS(vfg);
|
||||
if (STBD_FABS(vfb) > magn) magn = STBD_FABS(vfb);
|
||||
|
||||
if(magn < 4.0f) { // too small, default to luminance
|
||||
v_r = 299; // JPEG YCbCr luma coefs, scaled by 1000.
|
||||
v_g = 587;
|
||||
v_b = 114;
|
||||
} else {
|
||||
magn = 512.0 / magn;
|
||||
v_r = (int) (vfr * magn);
|
||||
v_g = (int) (vfg * magn);
|
||||
v_b = (int) (vfb * magn);
|
||||
}
|
||||
|
||||
minp = maxp = block;
|
||||
mind = maxd = block[0]*v_r + block[1]*v_g + block[2]*v_b;
|
||||
// Pick colors at extreme points
|
||||
for(i=1;i<16;i++)
|
||||
{
|
||||
int dot = block[i*4+0]*v_r + block[i*4+1]*v_g + block[i*4+2]*v_b;
|
||||
|
||||
if (dot < mind) {
|
||||
mind = dot;
|
||||
minp = block+i*4;
|
||||
}
|
||||
|
||||
if (dot > maxd) {
|
||||
maxd = dot;
|
||||
maxp = block+i*4;
|
||||
}
|
||||
}
|
||||
|
||||
*pmax16 = stb__As16Bit(maxp[0],maxp[1],maxp[2]);
|
||||
*pmin16 = stb__As16Bit(minp[0],minp[1],minp[2]);
|
||||
}
|
||||
|
||||
static const float stb__midpoints5[32] = {
|
||||
0.015686f, 0.047059f, 0.078431f, 0.111765f, 0.145098f, 0.176471f, 0.207843f, 0.241176f, 0.274510f, 0.305882f, 0.337255f, 0.370588f, 0.403922f, 0.435294f, 0.466667f, 0.5f,
|
||||
0.533333f, 0.564706f, 0.596078f, 0.629412f, 0.662745f, 0.694118f, 0.725490f, 0.758824f, 0.792157f, 0.823529f, 0.854902f, 0.888235f, 0.921569f, 0.952941f, 0.984314f, 1.0f
|
||||
};
|
||||
|
||||
static const float stb__midpoints6[64] = {
|
||||
0.007843f, 0.023529f, 0.039216f, 0.054902f, 0.070588f, 0.086275f, 0.101961f, 0.117647f, 0.133333f, 0.149020f, 0.164706f, 0.180392f, 0.196078f, 0.211765f, 0.227451f, 0.245098f,
|
||||
0.262745f, 0.278431f, 0.294118f, 0.309804f, 0.325490f, 0.341176f, 0.356863f, 0.372549f, 0.388235f, 0.403922f, 0.419608f, 0.435294f, 0.450980f, 0.466667f, 0.482353f, 0.500000f,
|
||||
0.517647f, 0.533333f, 0.549020f, 0.564706f, 0.580392f, 0.596078f, 0.611765f, 0.627451f, 0.643137f, 0.658824f, 0.674510f, 0.690196f, 0.705882f, 0.721569f, 0.737255f, 0.754902f,
|
||||
0.772549f, 0.788235f, 0.803922f, 0.819608f, 0.835294f, 0.850980f, 0.866667f, 0.882353f, 0.898039f, 0.913725f, 0.929412f, 0.945098f, 0.960784f, 0.976471f, 0.992157f, 1.0f
|
||||
};
|
||||
|
||||
static unsigned short stb__Quantize5(float x)
|
||||
{
|
||||
unsigned short q;
|
||||
x = x < 0 ? 0 : x > 1 ? 1 : x; // saturate
|
||||
q = (unsigned short)(x * 31);
|
||||
q += (x > stb__midpoints5[q]);
|
||||
return q;
|
||||
}
|
||||
|
||||
static unsigned short stb__Quantize6(float x)
|
||||
{
|
||||
unsigned short q;
|
||||
x = x < 0 ? 0 : x > 1 ? 1 : x; // saturate
|
||||
q = (unsigned short)(x * 63);
|
||||
q += (x > stb__midpoints6[q]);
|
||||
return q;
|
||||
}
|
||||
|
||||
// The refinement function. (Clever code, part 2)
|
||||
// Tries to optimize colors to suit block contents better.
|
||||
// (By solving a least squares system via normal equations+Cramer's rule)
|
||||
static int stb__RefineBlock(unsigned char *block, unsigned short *pmax16, unsigned short *pmin16, unsigned int mask)
|
||||
{
|
||||
static const int w1Tab[4] = { 3,0,2,1 };
|
||||
static const int prods[4] = { 0x090000,0x000900,0x040102,0x010402 };
|
||||
// ^some magic to save a lot of multiplies in the accumulating loop...
|
||||
// (precomputed products of weights for least squares system, accumulated inside one 32-bit register)
|
||||
|
||||
float f;
|
||||
unsigned short oldMin, oldMax, min16, max16;
|
||||
int i, akku = 0, xx,xy,yy;
|
||||
int At1_r,At1_g,At1_b;
|
||||
int At2_r,At2_g,At2_b;
|
||||
unsigned int cm = mask;
|
||||
|
||||
oldMin = *pmin16;
|
||||
oldMax = *pmax16;
|
||||
|
||||
if((mask ^ (mask<<2)) < 4) // all pixels have the same index?
|
||||
{
|
||||
// yes, linear system would be singular; solve using optimal
|
||||
// single-color match on average color
|
||||
int r = 8, g = 8, b = 8;
|
||||
for (i=0;i<16;++i) {
|
||||
r += block[i*4+0];
|
||||
g += block[i*4+1];
|
||||
b += block[i*4+2];
|
||||
}
|
||||
|
||||
r >>= 4; g >>= 4; b >>= 4;
|
||||
|
||||
max16 = (stb__OMatch5[r][0]<<11) | (stb__OMatch6[g][0]<<5) | stb__OMatch5[b][0];
|
||||
min16 = (stb__OMatch5[r][1]<<11) | (stb__OMatch6[g][1]<<5) | stb__OMatch5[b][1];
|
||||
} else {
|
||||
At1_r = At1_g = At1_b = 0;
|
||||
At2_r = At2_g = At2_b = 0;
|
||||
for (i=0;i<16;++i,cm>>=2) {
|
||||
int step = cm&3;
|
||||
int w1 = w1Tab[step];
|
||||
int r = block[i*4+0];
|
||||
int g = block[i*4+1];
|
||||
int b = block[i*4+2];
|
||||
|
||||
akku += prods[step];
|
||||
At1_r += w1*r;
|
||||
At1_g += w1*g;
|
||||
At1_b += w1*b;
|
||||
At2_r += r;
|
||||
At2_g += g;
|
||||
At2_b += b;
|
||||
}
|
||||
|
||||
At2_r = 3*At2_r - At1_r;
|
||||
At2_g = 3*At2_g - At1_g;
|
||||
At2_b = 3*At2_b - At1_b;
|
||||
|
||||
// extract solutions and decide solvability
|
||||
xx = akku >> 16;
|
||||
yy = (akku >> 8) & 0xff;
|
||||
xy = (akku >> 0) & 0xff;
|
||||
|
||||
f = 3.0f / 255.0f / (xx*yy - xy*xy);
|
||||
|
||||
max16 = stb__Quantize5((At1_r*yy - At2_r * xy) * f) << 11;
|
||||
max16 |= stb__Quantize6((At1_g*yy - At2_g * xy) * f) << 5;
|
||||
max16 |= stb__Quantize5((At1_b*yy - At2_b * xy) * f) << 0;
|
||||
|
||||
min16 = stb__Quantize5((At2_r*xx - At1_r * xy) * f) << 11;
|
||||
min16 |= stb__Quantize6((At2_g*xx - At1_g * xy) * f) << 5;
|
||||
min16 |= stb__Quantize5((At2_b*xx - At1_b * xy) * f) << 0;
|
||||
}
|
||||
|
||||
*pmin16 = min16;
|
||||
*pmax16 = max16;
|
||||
return oldMin != min16 || oldMax != max16;
|
||||
}
|
||||
|
||||
// Color block compression
|
||||
static void stb__CompressColorBlock(unsigned char *dest, unsigned char *block, int mode)
|
||||
{
|
||||
unsigned int mask;
|
||||
int i;
|
||||
int refinecount;
|
||||
unsigned short max16, min16;
|
||||
unsigned char color[4*4];
|
||||
|
||||
refinecount = (mode & STB_DXT_HIGHQUAL) ? 2 : 1;
|
||||
|
||||
// check if block is constant
|
||||
for (i=1;i<16;i++)
|
||||
if (((unsigned int *) block)[i] != ((unsigned int *) block)[0])
|
||||
break;
|
||||
|
||||
if(i == 16) { // constant color
|
||||
int r = block[0], g = block[1], b = block[2];
|
||||
mask = 0xaaaaaaaa;
|
||||
max16 = (stb__OMatch5[r][0]<<11) | (stb__OMatch6[g][0]<<5) | stb__OMatch5[b][0];
|
||||
min16 = (stb__OMatch5[r][1]<<11) | (stb__OMatch6[g][1]<<5) | stb__OMatch5[b][1];
|
||||
} else {
|
||||
// first step: PCA+map along principal axis
|
||||
stb__OptimizeColorsBlock(block,&max16,&min16);
|
||||
if (max16 != min16) {
|
||||
stb__EvalColors(color,max16,min16);
|
||||
mask = stb__MatchColorsBlock(block,color);
|
||||
} else
|
||||
mask = 0;
|
||||
|
||||
// third step: refine (multiple times if requested)
|
||||
for (i=0;i<refinecount;i++) {
|
||||
unsigned int lastmask = mask;
|
||||
|
||||
if (stb__RefineBlock(block,&max16,&min16,mask)) {
|
||||
if (max16 != min16) {
|
||||
stb__EvalColors(color,max16,min16);
|
||||
mask = stb__MatchColorsBlock(block,color);
|
||||
} else {
|
||||
mask = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(mask == lastmask)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// write the color block
|
||||
if(max16 < min16)
|
||||
{
|
||||
unsigned short t = min16;
|
||||
min16 = max16;
|
||||
max16 = t;
|
||||
mask ^= 0x55555555;
|
||||
}
|
||||
|
||||
dest[0] = (unsigned char) (max16);
|
||||
dest[1] = (unsigned char) (max16 >> 8);
|
||||
dest[2] = (unsigned char) (min16);
|
||||
dest[3] = (unsigned char) (min16 >> 8);
|
||||
dest[4] = (unsigned char) (mask);
|
||||
dest[5] = (unsigned char) (mask >> 8);
|
||||
dest[6] = (unsigned char) (mask >> 16);
|
||||
dest[7] = (unsigned char) (mask >> 24);
|
||||
}
|
||||
|
||||
// Alpha block compression (this is easy for a change)
|
||||
static void stb__CompressAlphaBlock(unsigned char *dest,unsigned char *src, int stride)
|
||||
{
|
||||
int i,dist,bias,dist4,dist2,bits,mask;
|
||||
|
||||
// find min/max color
|
||||
int mn,mx;
|
||||
mn = mx = src[0];
|
||||
|
||||
for (i=1;i<16;i++)
|
||||
{
|
||||
if (src[i*stride] < mn) mn = src[i*stride];
|
||||
else if (src[i*stride] > mx) mx = src[i*stride];
|
||||
}
|
||||
|
||||
// encode them
|
||||
dest[0] = (unsigned char)mx;
|
||||
dest[1] = (unsigned char)mn;
|
||||
dest += 2;
|
||||
|
||||
// determine bias and emit color indices
|
||||
// given the choice of mx/mn, these indices are optimal:
|
||||
// http://fgiesen.wordpress.com/2009/12/15/dxt5-alpha-block-index-determination/
|
||||
dist = mx-mn;
|
||||
dist4 = dist*4;
|
||||
dist2 = dist*2;
|
||||
bias = (dist < 8) ? (dist - 1) : (dist/2 + 2);
|
||||
bias -= mn * 7;
|
||||
bits = 0,mask=0;
|
||||
|
||||
for (i=0;i<16;i++) {
|
||||
int a = src[i*stride]*7 + bias;
|
||||
int ind,t;
|
||||
|
||||
// select index. this is a "linear scale" lerp factor between 0 (val=min) and 7 (val=max).
|
||||
t = (a >= dist4) ? -1 : 0; ind = t & 4; a -= dist4 & t;
|
||||
t = (a >= dist2) ? -1 : 0; ind += t & 2; a -= dist2 & t;
|
||||
ind += (a >= dist);
|
||||
|
||||
// turn linear scale into DXT index (0/1 are extremal pts)
|
||||
ind = -ind & 7;
|
||||
ind ^= (2 > ind);
|
||||
|
||||
// write index
|
||||
mask |= ind << bits;
|
||||
if((bits += 3) >= 8) {
|
||||
*dest++ = (unsigned char)mask;
|
||||
mask >>= 8;
|
||||
bits -= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void stb_compress_dxt_block(unsigned char *dest, const unsigned char *src, int alpha, int mode)
|
||||
{
|
||||
unsigned char data[16][4];
|
||||
if (alpha) {
|
||||
int i;
|
||||
stb__CompressAlphaBlock(dest,(unsigned char*) src+3, 4);
|
||||
dest += 8;
|
||||
// make a new copy of the data in which alpha is opaque,
|
||||
// because code uses a fast test for color constancy
|
||||
memcpy(data, src, 4*16);
|
||||
for (i=0; i < 16; ++i)
|
||||
data[i][3] = 255;
|
||||
src = &data[0][0];
|
||||
}
|
||||
|
||||
stb__CompressColorBlock(dest,(unsigned char*) src,mode);
|
||||
}
|
||||
|
||||
void stb_compress_bc4_block(unsigned char *dest, const unsigned char *src)
|
||||
{
|
||||
stb__CompressAlphaBlock(dest,(unsigned char*) src, 1);
|
||||
}
|
||||
|
||||
void stb_compress_bc5_block(unsigned char *dest, const unsigned char *src)
|
||||
{
|
||||
stb__CompressAlphaBlock(dest,(unsigned char*) src,2);
|
||||
stb__CompressAlphaBlock(dest + 8,(unsigned char*) src+1,2);
|
||||
}
|
||||
#endif // STB_DXT_IMPLEMENTATION
|
||||
|
||||
// Compile with STB_DXT_IMPLEMENTATION and STB_DXT_GENERATE_TABLES
|
||||
// defined to generate the tables above.
|
||||
#ifdef STB_DXT_GENERATE_TABLES
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
int i, j;
|
||||
const char *omatch_names[] = { "stb__OMatch5", "stb__OMatch6" };
|
||||
int dequant_mults[2] = { 33*4, 65 }; // .4 fixed-point dequant multipliers
|
||||
|
||||
// optimal endpoint tables
|
||||
for (i = 0; i < 2; ++i) {
|
||||
int dequant = dequant_mults[i];
|
||||
int size = i ? 64 : 32;
|
||||
printf("static const unsigned char %s[256][2] = {\n", omatch_names[i]);
|
||||
for (int j = 0; j < 256; ++j) {
|
||||
int mn, mx;
|
||||
int best_mn = 0, best_mx = 0;
|
||||
int best_err = 256 * 100;
|
||||
for (mn=0;mn<size;mn++) {
|
||||
for (mx=0;mx<size;mx++) {
|
||||
int mine = (mn * dequant) >> 4;
|
||||
int maxe = (mx * dequant) >> 4;
|
||||
int err = abs(stb__Lerp13(maxe, mine) - j) * 100;
|
||||
|
||||
// DX10 spec says that interpolation must be within 3% of "correct" result,
|
||||
// add this as error term. Normally we'd expect a random distribution of
|
||||
// +-1.5% error, but nowhere in the spec does it say that the error has to be
|
||||
// unbiased - better safe than sorry.
|
||||
err += abs(maxe - mine) * 3;
|
||||
|
||||
if(err < best_err) {
|
||||
best_mn = mn;
|
||||
best_mx = mx;
|
||||
best_err = err;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((j % 8) == 0) printf(" "); // 2 spaces, third is done below
|
||||
printf(" { %2d, %2d },", best_mx, best_mn);
|
||||
if ((j % 8) == 7) printf("\n");
|
||||
}
|
||||
printf("};\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
------------------------------------------------------------------------------
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT License
|
||||
Copyright (c) 2017 Sean Barrett
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
7988
stb_image.h
7988
stb_image.h
File diff suppressed because it is too large
Load Diff
1724
stb_image_write.h
1724
stb_image_write.h
File diff suppressed because it is too large
Load Diff
623
stb_rect_pack.h
623
stb_rect_pack.h
@@ -1,623 +0,0 @@
|
||||
// stb_rect_pack.h - v1.01 - public domain - rectangle packing
|
||||
// Sean Barrett 2014
|
||||
//
|
||||
// Useful for e.g. packing rectangular textures into an atlas.
|
||||
// Does not do rotation.
|
||||
//
|
||||
// Before #including,
|
||||
//
|
||||
// #define STB_RECT_PACK_IMPLEMENTATION
|
||||
//
|
||||
// in the file that you want to have the implementation.
|
||||
//
|
||||
// Not necessarily the awesomest packing method, but better than
|
||||
// the totally naive one in stb_truetype (which is primarily what
|
||||
// this is meant to replace).
|
||||
//
|
||||
// Has only had a few tests run, may have issues.
|
||||
//
|
||||
// More docs to come.
|
||||
//
|
||||
// No memory allocations; uses qsort() and assert() from stdlib.
|
||||
// Can override those by defining STBRP_SORT and STBRP_ASSERT.
|
||||
//
|
||||
// This library currently uses the Skyline Bottom-Left algorithm.
|
||||
//
|
||||
// Please note: better rectangle packers are welcome! Please
|
||||
// implement them to the same API, but with a different init
|
||||
// function.
|
||||
//
|
||||
// Credits
|
||||
//
|
||||
// Library
|
||||
// Sean Barrett
|
||||
// Minor features
|
||||
// Martins Mozeiko
|
||||
// github:IntellectualKitty
|
||||
//
|
||||
// Bugfixes / warning fixes
|
||||
// Jeremy Jaussaud
|
||||
// Fabian Giesen
|
||||
//
|
||||
// Version history:
|
||||
//
|
||||
// 1.01 (2021-07-11) always use large rect mode, expose STBRP__MAXVAL in public section
|
||||
// 1.00 (2019-02-25) avoid small space waste; gracefully fail too-wide rectangles
|
||||
// 0.99 (2019-02-07) warning fixes
|
||||
// 0.11 (2017-03-03) return packing success/fail result
|
||||
// 0.10 (2016-10-25) remove cast-away-const to avoid warnings
|
||||
// 0.09 (2016-08-27) fix compiler warnings
|
||||
// 0.08 (2015-09-13) really fix bug with empty rects (w=0 or h=0)
|
||||
// 0.07 (2015-09-13) fix bug with empty rects (w=0 or h=0)
|
||||
// 0.06 (2015-04-15) added STBRP_SORT to allow replacing qsort
|
||||
// 0.05: added STBRP_ASSERT to allow replacing assert
|
||||
// 0.04: fixed minor bug in STBRP_LARGE_RECTS support
|
||||
// 0.01: initial release
|
||||
//
|
||||
// LICENSE
|
||||
//
|
||||
// See end of file for license information.
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// INCLUDE SECTION
|
||||
//
|
||||
|
||||
#ifndef STB_INCLUDE_STB_RECT_PACK_H
|
||||
#define STB_INCLUDE_STB_RECT_PACK_H
|
||||
|
||||
#define STB_RECT_PACK_VERSION 1
|
||||
|
||||
#ifdef STBRP_STATIC
|
||||
#define STBRP_DEF static
|
||||
#else
|
||||
#define STBRP_DEF extern
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct stbrp_context stbrp_context;
|
||||
typedef struct stbrp_node stbrp_node;
|
||||
typedef struct stbrp_rect stbrp_rect;
|
||||
|
||||
typedef int stbrp_coord;
|
||||
|
||||
#define STBRP__MAXVAL 0x7fffffff
|
||||
// Mostly for internal use, but this is the maximum supported coordinate value.
|
||||
|
||||
STBRP_DEF int stbrp_pack_rects (stbrp_context *context, stbrp_rect *rects, int num_rects);
|
||||
// Assign packed locations to rectangles. The rectangles are of type
|
||||
// 'stbrp_rect' defined below, stored in the array 'rects', and there
|
||||
// are 'num_rects' many of them.
|
||||
//
|
||||
// Rectangles which are successfully packed have the 'was_packed' flag
|
||||
// set to a non-zero value and 'x' and 'y' store the minimum location
|
||||
// on each axis (i.e. bottom-left in cartesian coordinates, top-left
|
||||
// if you imagine y increasing downwards). Rectangles which do not fit
|
||||
// have the 'was_packed' flag set to 0.
|
||||
//
|
||||
// You should not try to access the 'rects' array from another thread
|
||||
// while this function is running, as the function temporarily reorders
|
||||
// the array while it executes.
|
||||
//
|
||||
// To pack into another rectangle, you need to call stbrp_init_target
|
||||
// again. To continue packing into the same rectangle, you can call
|
||||
// this function again. Calling this multiple times with multiple rect
|
||||
// arrays will probably produce worse packing results than calling it
|
||||
// a single time with the full rectangle array, but the option is
|
||||
// available.
|
||||
//
|
||||
// The function returns 1 if all of the rectangles were successfully
|
||||
// packed and 0 otherwise.
|
||||
|
||||
struct stbrp_rect
|
||||
{
|
||||
// reserved for your use:
|
||||
int id;
|
||||
|
||||
// input:
|
||||
stbrp_coord w, h;
|
||||
|
||||
// output:
|
||||
stbrp_coord x, y;
|
||||
int was_packed; // non-zero if valid packing
|
||||
|
||||
}; // 16 bytes, nominally
|
||||
|
||||
|
||||
STBRP_DEF void stbrp_init_target (stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes);
|
||||
// Initialize a rectangle packer to:
|
||||
// pack a rectangle that is 'width' by 'height' in dimensions
|
||||
// using temporary storage provided by the array 'nodes', which is 'num_nodes' long
|
||||
//
|
||||
// You must call this function every time you start packing into a new target.
|
||||
//
|
||||
// There is no "shutdown" function. The 'nodes' memory must stay valid for
|
||||
// the following stbrp_pack_rects() call (or calls), but can be freed after
|
||||
// the call (or calls) finish.
|
||||
//
|
||||
// Note: to guarantee best results, either:
|
||||
// 1. make sure 'num_nodes' >= 'width'
|
||||
// or 2. call stbrp_allow_out_of_mem() defined below with 'allow_out_of_mem = 1'
|
||||
//
|
||||
// If you don't do either of the above things, widths will be quantized to multiples
|
||||
// of small integers to guarantee the algorithm doesn't run out of temporary storage.
|
||||
//
|
||||
// If you do #2, then the non-quantized algorithm will be used, but the algorithm
|
||||
// may run out of temporary storage and be unable to pack some rectangles.
|
||||
|
||||
STBRP_DEF void stbrp_setup_allow_out_of_mem (stbrp_context *context, int allow_out_of_mem);
|
||||
// Optionally call this function after init but before doing any packing to
|
||||
// change the handling of the out-of-temp-memory scenario, described above.
|
||||
// If you call init again, this will be reset to the default (false).
|
||||
|
||||
|
||||
STBRP_DEF void stbrp_setup_heuristic (stbrp_context *context, int heuristic);
|
||||
// Optionally select which packing heuristic the library should use. Different
|
||||
// heuristics will produce better/worse results for different data sets.
|
||||
// If you call init again, this will be reset to the default.
|
||||
|
||||
enum
|
||||
{
|
||||
STBRP_HEURISTIC_Skyline_default=0,
|
||||
STBRP_HEURISTIC_Skyline_BL_sortHeight = STBRP_HEURISTIC_Skyline_default,
|
||||
STBRP_HEURISTIC_Skyline_BF_sortHeight
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// the details of the following structures don't matter to you, but they must
|
||||
// be visible so you can handle the memory allocations for them
|
||||
|
||||
struct stbrp_node
|
||||
{
|
||||
stbrp_coord x,y;
|
||||
stbrp_node *next;
|
||||
};
|
||||
|
||||
struct stbrp_context
|
||||
{
|
||||
int width;
|
||||
int height;
|
||||
int align;
|
||||
int init_mode;
|
||||
int heuristic;
|
||||
int num_nodes;
|
||||
stbrp_node *active_head;
|
||||
stbrp_node *free_head;
|
||||
stbrp_node extra[2]; // we allocate two extra nodes so optimal user-node-count is 'width' not 'width+2'
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPLEMENTATION SECTION
|
||||
//
|
||||
|
||||
#ifdef STB_RECT_PACK_IMPLEMENTATION
|
||||
#ifndef STBRP_SORT
|
||||
#include <stdlib.h>
|
||||
#define STBRP_SORT qsort
|
||||
#endif
|
||||
|
||||
#ifndef STBRP_ASSERT
|
||||
#include <assert.h>
|
||||
#define STBRP_ASSERT assert
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define STBRP__NOTUSED(v) (void)(v)
|
||||
#define STBRP__CDECL __cdecl
|
||||
#else
|
||||
#define STBRP__NOTUSED(v) (void)sizeof(v)
|
||||
#define STBRP__CDECL
|
||||
#endif
|
||||
|
||||
enum
|
||||
{
|
||||
STBRP__INIT_skyline = 1
|
||||
};
|
||||
|
||||
STBRP_DEF void stbrp_setup_heuristic(stbrp_context *context, int heuristic)
|
||||
{
|
||||
switch (context->init_mode) {
|
||||
case STBRP__INIT_skyline:
|
||||
STBRP_ASSERT(heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight || heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight);
|
||||
context->heuristic = heuristic;
|
||||
break;
|
||||
default:
|
||||
STBRP_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
STBRP_DEF void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_out_of_mem)
|
||||
{
|
||||
if (allow_out_of_mem)
|
||||
// if it's ok to run out of memory, then don't bother aligning them;
|
||||
// this gives better packing, but may fail due to OOM (even though
|
||||
// the rectangles easily fit). @TODO a smarter approach would be to only
|
||||
// quantize once we've hit OOM, then we could get rid of this parameter.
|
||||
context->align = 1;
|
||||
else {
|
||||
// if it's not ok to run out of memory, then quantize the widths
|
||||
// so that num_nodes is always enough nodes.
|
||||
//
|
||||
// I.e. num_nodes * align >= width
|
||||
// align >= width / num_nodes
|
||||
// align = ceil(width/num_nodes)
|
||||
|
||||
context->align = (context->width + context->num_nodes-1) / context->num_nodes;
|
||||
}
|
||||
}
|
||||
|
||||
STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < num_nodes-1; ++i)
|
||||
nodes[i].next = &nodes[i+1];
|
||||
nodes[i].next = NULL;
|
||||
context->init_mode = STBRP__INIT_skyline;
|
||||
context->heuristic = STBRP_HEURISTIC_Skyline_default;
|
||||
context->free_head = &nodes[0];
|
||||
context->active_head = &context->extra[0];
|
||||
context->width = width;
|
||||
context->height = height;
|
||||
context->num_nodes = num_nodes;
|
||||
stbrp_setup_allow_out_of_mem(context, 0);
|
||||
|
||||
// node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly)
|
||||
context->extra[0].x = 0;
|
||||
context->extra[0].y = 0;
|
||||
context->extra[0].next = &context->extra[1];
|
||||
context->extra[1].x = (stbrp_coord) width;
|
||||
context->extra[1].y = (1<<30);
|
||||
context->extra[1].next = NULL;
|
||||
}
|
||||
|
||||
// find minimum y position if it starts at x1
|
||||
static int stbrp__skyline_find_min_y(stbrp_context *c, stbrp_node *first, int x0, int width, int *pwaste)
|
||||
{
|
||||
stbrp_node *node = first;
|
||||
int x1 = x0 + width;
|
||||
int min_y, visited_width, waste_area;
|
||||
|
||||
STBRP__NOTUSED(c);
|
||||
|
||||
STBRP_ASSERT(first->x <= x0);
|
||||
|
||||
#if 0
|
||||
// skip in case we're past the node
|
||||
while (node->next->x <= x0)
|
||||
++node;
|
||||
#else
|
||||
STBRP_ASSERT(node->next->x > x0); // we ended up handling this in the caller for efficiency
|
||||
#endif
|
||||
|
||||
STBRP_ASSERT(node->x <= x0);
|
||||
|
||||
min_y = 0;
|
||||
waste_area = 0;
|
||||
visited_width = 0;
|
||||
while (node->x < x1) {
|
||||
if (node->y > min_y) {
|
||||
// raise min_y higher.
|
||||
// we've accounted for all waste up to min_y,
|
||||
// but we'll now add more waste for everything we've visted
|
||||
waste_area += visited_width * (node->y - min_y);
|
||||
min_y = node->y;
|
||||
// the first time through, visited_width might be reduced
|
||||
if (node->x < x0)
|
||||
visited_width += node->next->x - x0;
|
||||
else
|
||||
visited_width += node->next->x - node->x;
|
||||
} else {
|
||||
// add waste area
|
||||
int under_width = node->next->x - node->x;
|
||||
if (under_width + visited_width > width)
|
||||
under_width = width - visited_width;
|
||||
waste_area += under_width * (min_y - node->y);
|
||||
visited_width += under_width;
|
||||
}
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
*pwaste = waste_area;
|
||||
return min_y;
|
||||
}
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int x,y;
|
||||
stbrp_node **prev_link;
|
||||
} stbrp__findresult;
|
||||
|
||||
static stbrp__findresult stbrp__skyline_find_best_pos(stbrp_context *c, int width, int height)
|
||||
{
|
||||
int best_waste = (1<<30), best_x, best_y = (1 << 30);
|
||||
stbrp__findresult fr;
|
||||
stbrp_node **prev, *node, *tail, **best = NULL;
|
||||
|
||||
// align to multiple of c->align
|
||||
width = (width + c->align - 1);
|
||||
width -= width % c->align;
|
||||
STBRP_ASSERT(width % c->align == 0);
|
||||
|
||||
// if it can't possibly fit, bail immediately
|
||||
if (width > c->width || height > c->height) {
|
||||
fr.prev_link = NULL;
|
||||
fr.x = fr.y = 0;
|
||||
return fr;
|
||||
}
|
||||
|
||||
node = c->active_head;
|
||||
prev = &c->active_head;
|
||||
while (node->x + width <= c->width) {
|
||||
int y,waste;
|
||||
y = stbrp__skyline_find_min_y(c, node, node->x, width, &waste);
|
||||
if (c->heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight) { // actually just want to test BL
|
||||
// bottom left
|
||||
if (y < best_y) {
|
||||
best_y = y;
|
||||
best = prev;
|
||||
}
|
||||
} else {
|
||||
// best-fit
|
||||
if (y + height <= c->height) {
|
||||
// can only use it if it first vertically
|
||||
if (y < best_y || (y == best_y && waste < best_waste)) {
|
||||
best_y = y;
|
||||
best_waste = waste;
|
||||
best = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
prev = &node->next;
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
best_x = (best == NULL) ? 0 : (*best)->x;
|
||||
|
||||
// if doing best-fit (BF), we also have to try aligning right edge to each node position
|
||||
//
|
||||
// e.g, if fitting
|
||||
//
|
||||
// ____________________
|
||||
// |____________________|
|
||||
//
|
||||
// into
|
||||
//
|
||||
// | |
|
||||
// | ____________|
|
||||
// |____________|
|
||||
//
|
||||
// then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned
|
||||
//
|
||||
// This makes BF take about 2x the time
|
||||
|
||||
if (c->heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight) {
|
||||
tail = c->active_head;
|
||||
node = c->active_head;
|
||||
prev = &c->active_head;
|
||||
// find first node that's admissible
|
||||
while (tail->x < width)
|
||||
tail = tail->next;
|
||||
while (tail) {
|
||||
int xpos = tail->x - width;
|
||||
int y,waste;
|
||||
STBRP_ASSERT(xpos >= 0);
|
||||
// find the left position that matches this
|
||||
while (node->next->x <= xpos) {
|
||||
prev = &node->next;
|
||||
node = node->next;
|
||||
}
|
||||
STBRP_ASSERT(node->next->x > xpos && node->x <= xpos);
|
||||
y = stbrp__skyline_find_min_y(c, node, xpos, width, &waste);
|
||||
if (y + height <= c->height) {
|
||||
if (y <= best_y) {
|
||||
if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) {
|
||||
best_x = xpos;
|
||||
STBRP_ASSERT(y <= best_y);
|
||||
best_y = y;
|
||||
best_waste = waste;
|
||||
best = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
}
|
||||
|
||||
fr.prev_link = best;
|
||||
fr.x = best_x;
|
||||
fr.y = best_y;
|
||||
return fr;
|
||||
}
|
||||
|
||||
static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, int width, int height)
|
||||
{
|
||||
// find best position according to heuristic
|
||||
stbrp__findresult res = stbrp__skyline_find_best_pos(context, width, height);
|
||||
stbrp_node *node, *cur;
|
||||
|
||||
// bail if:
|
||||
// 1. it failed
|
||||
// 2. the best node doesn't fit (we don't always check this)
|
||||
// 3. we're out of memory
|
||||
if (res.prev_link == NULL || res.y + height > context->height || context->free_head == NULL) {
|
||||
res.prev_link = NULL;
|
||||
return res;
|
||||
}
|
||||
|
||||
// on success, create new node
|
||||
node = context->free_head;
|
||||
node->x = (stbrp_coord) res.x;
|
||||
node->y = (stbrp_coord) (res.y + height);
|
||||
|
||||
context->free_head = node->next;
|
||||
|
||||
// insert the new node into the right starting point, and
|
||||
// let 'cur' point to the remaining nodes needing to be
|
||||
// stiched back in
|
||||
|
||||
cur = *res.prev_link;
|
||||
if (cur->x < res.x) {
|
||||
// preserve the existing one, so start testing with the next one
|
||||
stbrp_node *next = cur->next;
|
||||
cur->next = node;
|
||||
cur = next;
|
||||
} else {
|
||||
*res.prev_link = node;
|
||||
}
|
||||
|
||||
// from here, traverse cur and free the nodes, until we get to one
|
||||
// that shouldn't be freed
|
||||
while (cur->next && cur->next->x <= res.x + width) {
|
||||
stbrp_node *next = cur->next;
|
||||
// move the current node to the free list
|
||||
cur->next = context->free_head;
|
||||
context->free_head = cur;
|
||||
cur = next;
|
||||
}
|
||||
|
||||
// stitch the list back in
|
||||
node->next = cur;
|
||||
|
||||
if (cur->x < res.x + width)
|
||||
cur->x = (stbrp_coord) (res.x + width);
|
||||
|
||||
#ifdef _DEBUG
|
||||
cur = context->active_head;
|
||||
while (cur->x < context->width) {
|
||||
STBRP_ASSERT(cur->x < cur->next->x);
|
||||
cur = cur->next;
|
||||
}
|
||||
STBRP_ASSERT(cur->next == NULL);
|
||||
|
||||
{
|
||||
int count=0;
|
||||
cur = context->active_head;
|
||||
while (cur) {
|
||||
cur = cur->next;
|
||||
++count;
|
||||
}
|
||||
cur = context->free_head;
|
||||
while (cur) {
|
||||
cur = cur->next;
|
||||
++count;
|
||||
}
|
||||
STBRP_ASSERT(count == context->num_nodes+2);
|
||||
}
|
||||
#endif
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int STBRP__CDECL rect_height_compare(const void *a, const void *b)
|
||||
{
|
||||
const stbrp_rect *p = (const stbrp_rect *) a;
|
||||
const stbrp_rect *q = (const stbrp_rect *) b;
|
||||
if (p->h > q->h)
|
||||
return -1;
|
||||
if (p->h < q->h)
|
||||
return 1;
|
||||
return (p->w > q->w) ? -1 : (p->w < q->w);
|
||||
}
|
||||
|
||||
static int STBRP__CDECL rect_original_order(const void *a, const void *b)
|
||||
{
|
||||
const stbrp_rect *p = (const stbrp_rect *) a;
|
||||
const stbrp_rect *q = (const stbrp_rect *) b;
|
||||
return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed);
|
||||
}
|
||||
|
||||
STBRP_DEF int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects)
|
||||
{
|
||||
int i, all_rects_packed = 1;
|
||||
|
||||
// we use the 'was_packed' field internally to allow sorting/unsorting
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
rects[i].was_packed = i;
|
||||
}
|
||||
|
||||
// sort according to heuristic
|
||||
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_height_compare);
|
||||
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
if (rects[i].w == 0 || rects[i].h == 0) {
|
||||
rects[i].x = rects[i].y = 0; // empty rect needs no space
|
||||
} else {
|
||||
stbrp__findresult fr = stbrp__skyline_pack_rectangle(context, rects[i].w, rects[i].h);
|
||||
if (fr.prev_link) {
|
||||
rects[i].x = (stbrp_coord) fr.x;
|
||||
rects[i].y = (stbrp_coord) fr.y;
|
||||
} else {
|
||||
rects[i].x = rects[i].y = STBRP__MAXVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsort
|
||||
STBRP_SORT(rects, num_rects, sizeof(rects[0]), rect_original_order);
|
||||
|
||||
// set was_packed flags and all_rects_packed status
|
||||
for (i=0; i < num_rects; ++i) {
|
||||
rects[i].was_packed = !(rects[i].x == STBRP__MAXVAL && rects[i].y == STBRP__MAXVAL);
|
||||
if (!rects[i].was_packed)
|
||||
all_rects_packed = 0;
|
||||
}
|
||||
|
||||
// return the all_rects_packed status
|
||||
return all_rects_packed;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
------------------------------------------------------------------------------
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT License
|
||||
Copyright (c) 2017 Sean Barrett
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test camera capture with colorspace conversion
|
||||
var camera = use('camera');
|
||||
var surface = use('sdl/surface');
|
||||
var surface = use('sdl3/surface');
|
||||
var json = use('json');
|
||||
|
||||
// Get first camera
|
||||
|
||||
@@ -7,7 +7,7 @@ var math = use('math/radians')
|
||||
input.watch($self)
|
||||
|
||||
// Create SDL video actor
|
||||
var video_actor = use('sdl/video');
|
||||
var video_actor = use('sdl3/video');
|
||||
|
||||
var window_id = null;
|
||||
var renderer_id = null;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Test SDL_Surface module
|
||||
var Surface = use('sdl/surface');
|
||||
var Surface = use('sdl3/surface');
|
||||
|
||||
// Test creating a surface
|
||||
var surf = new Surface({width: 100, height: 100});
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Test surface colorspace conversion
|
||||
var surface = use('sdl/surface');
|
||||
var surface = use('sdl3/surface');
|
||||
var json = use('json');
|
||||
|
||||
// Create a test surface
|
||||
|
||||
@@ -4,11 +4,11 @@ var graphics
|
||||
var os = use('os');
|
||||
var input = use('input')
|
||||
var json = use('json')
|
||||
var surface = use('sdl/surface')
|
||||
var surface = use('sdl3/surface')
|
||||
input.watch($self)
|
||||
|
||||
// Create SDL video actor
|
||||
var video_actor = use('sdl/video');
|
||||
var video_actor = use('sdl3/video');
|
||||
var camera = use('camera')
|
||||
|
||||
var window_id = null;
|
||||
|
||||
Reference in New Issue
Block a user