Grayscale quantization and fixing some defaults to support rgb565
This commit is contained in:
@@ -4,149 +4,160 @@
|
||||
#include <string.h>
|
||||
|
||||
// LVGL's LodePNG memory optimization
|
||||
// Instead of standard heap allocations which fragment quickly and crash on the ESP32,
|
||||
// we allocate a single massive buffer in PSRAM and just bump a pointer during encode!
|
||||
// Instead of standard heap allocations which fragment quickly and crash on the
|
||||
// ESP32, we allocate a single massive buffer in PSRAM and just bump a pointer
|
||||
// during encode!
|
||||
|
||||
static const char* kTagLodeAlloc = "LODE_ALLOC";
|
||||
static const char *kTagLodeAlloc = "LODE_ALLOC";
|
||||
|
||||
// 2MB buffer for LodePNG encoding intermediate state.
|
||||
// A typical 800x480 grayscale PNG should compress to ~50-100KB, but the dynamic window
|
||||
// matching and filtering algorithms need a good amount of scratch space.
|
||||
// We can tune this down to 1MB if 2MB is too aggressive, but PSRAM provides 8MB.
|
||||
#define LODEPNG_ALLOC_POOL_SIZE (2 * 1024 * 1024)
|
||||
// 2MB buffer for LodePNG encoding intermediate state.
|
||||
// A typical 800x480 grayscale PNG should compress to ~50-100KB, but the dynamic
|
||||
// window matching and filtering algorithms need a good amount of scratch space.
|
||||
// We can tune this down to 1MB if 2MB is too aggressive, but PSRAM provides
|
||||
// 8MB.
|
||||
#define LODEPNG_ALLOC_POOL_SIZE (1 * 1024 * 1024)
|
||||
|
||||
static uint8_t* s_lodepng_pool = nullptr;
|
||||
static uint8_t *s_lodepng_pool = nullptr;
|
||||
static size_t s_lodepng_pool_used = 0;
|
||||
|
||||
void lodepng_allocator_init()
|
||||
{
|
||||
if (s_lodepng_pool != nullptr) return;
|
||||
if (s_lodepng_pool != nullptr)
|
||||
return;
|
||||
|
||||
ESP_LOGI(kTagLodeAlloc, "Allocating %d bytes in PSRAM for LodePNG bump allocator...", LODEPNG_ALLOC_POOL_SIZE);
|
||||
|
||||
// SPIRAM fallback to internal if someone tests without a PSRAM chip
|
||||
s_lodepng_pool = (uint8_t*)heap_caps_malloc(LODEPNG_ALLOC_POOL_SIZE, MALLOC_CAP_SPIRAM);
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
s_lodepng_pool = (uint8_t*)heap_caps_malloc(LODEPNG_ALLOC_POOL_SIZE, MALLOC_CAP_DEFAULT);
|
||||
}
|
||||
ESP_LOGI(kTagLodeAlloc,
|
||||
"Allocating %d bytes in PSRAM for LodePNG bump allocator...",
|
||||
LODEPNG_ALLOC_POOL_SIZE);
|
||||
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc, "CRITICAL: Failed to allocate LodePNG PSRAM pool!");
|
||||
}
|
||||
// SPIRAM fallback to internal if someone tests without a PSRAM chip
|
||||
s_lodepng_pool =
|
||||
(uint8_t *)heap_caps_malloc(LODEPNG_ALLOC_POOL_SIZE, MALLOC_CAP_SPIRAM);
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
s_lodepng_pool = (uint8_t *)heap_caps_malloc(LODEPNG_ALLOC_POOL_SIZE,
|
||||
MALLOC_CAP_DEFAULT);
|
||||
}
|
||||
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc, "CRITICAL: Failed to allocate LodePNG PSRAM pool!");
|
||||
}
|
||||
}
|
||||
|
||||
void lodepng_allocator_reset()
|
||||
{
|
||||
s_lodepng_pool_used = 0;
|
||||
}
|
||||
void lodepng_allocator_reset() { s_lodepng_pool_used = 0; }
|
||||
|
||||
void lodepng_allocator_free()
|
||||
{
|
||||
if (s_lodepng_pool)
|
||||
{
|
||||
free(s_lodepng_pool);
|
||||
s_lodepng_pool = nullptr;
|
||||
}
|
||||
s_lodepng_pool_used = 0;
|
||||
if (s_lodepng_pool)
|
||||
{
|
||||
free(s_lodepng_pool);
|
||||
s_lodepng_pool = nullptr;
|
||||
}
|
||||
s_lodepng_pool_used = 0;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------
|
||||
// Custom Allocators injected into lodepng.c
|
||||
// ----------------------------------------------------
|
||||
|
||||
// To support realloc properly, we prefix each allocation with an 8-byte header storing the size.
|
||||
struct AllocHeader {
|
||||
size_t size;
|
||||
// To support realloc properly, we prefix each allocation with an 8-byte header
|
||||
// storing the size.
|
||||
struct AllocHeader
|
||||
{
|
||||
size_t size;
|
||||
};
|
||||
|
||||
void* lodepng_custom_malloc(size_t size)
|
||||
void *lodepng_custom_malloc(size_t size)
|
||||
{
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc, "lodepng_malloc called before lodepng_allocator_init!");
|
||||
return nullptr;
|
||||
}
|
||||
if (!s_lodepng_pool)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc,
|
||||
"lodepng_malloc called before lodepng_allocator_init!");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Align size to 8 bytes to avoid unaligned access faults
|
||||
size_t aligned_size = (size + 7) & ~7;
|
||||
size_t total_alloc = sizeof(AllocHeader) + aligned_size;
|
||||
// Align size to 8 bytes to avoid unaligned access faults
|
||||
size_t aligned_size = (size + 7) & ~7;
|
||||
size_t total_alloc = sizeof(AllocHeader) + aligned_size;
|
||||
|
||||
if (s_lodepng_pool_used + total_alloc > LODEPNG_ALLOC_POOL_SIZE)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc, "LodePNG pool exhausted! Requested: %zu, Used: %zu, Total: %d", size, s_lodepng_pool_used, LODEPNG_ALLOC_POOL_SIZE);
|
||||
return nullptr;
|
||||
}
|
||||
if (s_lodepng_pool_used + total_alloc > LODEPNG_ALLOC_POOL_SIZE)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc,
|
||||
"LodePNG pool exhausted! Requested: %zu, Used: %zu, Total: %d",
|
||||
size, s_lodepng_pool_used, LODEPNG_ALLOC_POOL_SIZE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Grab pointer and bump
|
||||
uint8_t* ptr = s_lodepng_pool + s_lodepng_pool_used;
|
||||
s_lodepng_pool_used += total_alloc;
|
||||
// Grab pointer and bump
|
||||
uint8_t *ptr = s_lodepng_pool + s_lodepng_pool_used;
|
||||
s_lodepng_pool_used += total_alloc;
|
||||
|
||||
// Write header
|
||||
AllocHeader* header = (AllocHeader*)ptr;
|
||||
header->size = size; // We store exact size for realloc memcpy bounds
|
||||
// Write header
|
||||
AllocHeader *header = (AllocHeader *)ptr;
|
||||
header->size = size; // We store exact size for realloc memcpy bounds
|
||||
|
||||
// Return pointer right after header
|
||||
return ptr + sizeof(AllocHeader);
|
||||
// Return pointer right after header
|
||||
return ptr + sizeof(AllocHeader);
|
||||
}
|
||||
|
||||
void* lodepng_custom_realloc(void* ptr, size_t new_size)
|
||||
void *lodepng_custom_realloc(void *ptr, size_t new_size)
|
||||
{
|
||||
if (!ptr)
|
||||
if (!ptr)
|
||||
{
|
||||
return lodepng_custom_malloc(new_size);
|
||||
}
|
||||
|
||||
if (new_size == 0)
|
||||
{
|
||||
lodepng_custom_free(ptr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Get original header
|
||||
uint8_t *orig_ptr = (uint8_t *)ptr - sizeof(AllocHeader);
|
||||
AllocHeader *header = (AllocHeader *)orig_ptr;
|
||||
|
||||
size_t old_size = header->size;
|
||||
if (new_size <= old_size)
|
||||
{
|
||||
// Don't shrink to save time, bump allocator can't reclaim it easily anyway.
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Let's see if this ptr was the *very last* allocation.
|
||||
// If so, we can just expand it in place!
|
||||
size_t old_aligned_size = (old_size + 7) & ~7;
|
||||
if (orig_ptr + sizeof(AllocHeader) + old_aligned_size ==
|
||||
s_lodepng_pool + s_lodepng_pool_used)
|
||||
{
|
||||
// We are at the end! Just bump further!
|
||||
size_t new_aligned_size = (new_size + 7) & ~7;
|
||||
size_t size_diff = new_aligned_size - old_aligned_size;
|
||||
|
||||
if (s_lodepng_pool_used + size_diff > LODEPNG_ALLOC_POOL_SIZE)
|
||||
{
|
||||
return lodepng_custom_malloc(new_size);
|
||||
}
|
||||
|
||||
if (new_size == 0)
|
||||
{
|
||||
lodepng_custom_free(ptr);
|
||||
return nullptr;
|
||||
ESP_LOGE(kTagLodeAlloc,
|
||||
"LodePNG pool exhausted during in-place realloc!");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Get original header
|
||||
uint8_t* orig_ptr = (uint8_t*)ptr - sizeof(AllocHeader);
|
||||
AllocHeader* header = (AllocHeader*)orig_ptr;
|
||||
s_lodepng_pool_used += size_diff;
|
||||
header->size = new_size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
size_t old_size = header->size;
|
||||
if (new_size <= old_size)
|
||||
{
|
||||
// Don't shrink to save time, bump allocator can't reclaim it easily anyway.
|
||||
return ptr;
|
||||
}
|
||||
// Otherwise, we have to copy into a new block
|
||||
void *new_ptr = lodepng_custom_malloc(new_size);
|
||||
if (new_ptr)
|
||||
{
|
||||
memcpy(new_ptr, ptr, old_size);
|
||||
}
|
||||
|
||||
// Let's see if this ptr was the *very last* allocation.
|
||||
// If so, we can just expand it in place!
|
||||
size_t old_aligned_size = (old_size + 7) & ~7;
|
||||
if (orig_ptr + sizeof(AllocHeader) + old_aligned_size == s_lodepng_pool + s_lodepng_pool_used)
|
||||
{
|
||||
// We are at the end! Just bump further!
|
||||
size_t new_aligned_size = (new_size + 7) & ~7;
|
||||
size_t size_diff = new_aligned_size - old_aligned_size;
|
||||
|
||||
if (s_lodepng_pool_used + size_diff > LODEPNG_ALLOC_POOL_SIZE)
|
||||
{
|
||||
ESP_LOGE(kTagLodeAlloc, "LodePNG pool exhausted during in-place realloc!");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
s_lodepng_pool_used += size_diff;
|
||||
header->size = new_size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Otherwise, we have to copy into a new block
|
||||
void* new_ptr = lodepng_custom_malloc(new_size);
|
||||
if (new_ptr)
|
||||
{
|
||||
memcpy(new_ptr, ptr, old_size);
|
||||
}
|
||||
|
||||
return new_ptr;
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void lodepng_custom_free(void* ptr)
|
||||
void lodepng_custom_free(void *ptr)
|
||||
{
|
||||
// No-op! The bump pointer will just reset to 0 once the API endpoint is done!
|
||||
(void)ptr;
|
||||
// No-op! The bump pointer will just reset to 0 once the API endpoint is done!
|
||||
(void)ptr;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user