Implementing the concurrent access. Supports well more than 5 devices, enough for the house

This commit is contained in:
2026-03-08 17:50:07 -04:00
parent 7452b1b807
commit 5335da5c29
8 changed files with 148 additions and 37 deletions

View File

@@ -4,6 +4,7 @@
import Sidebar from "./lib/Sidebar.svelte"; import Sidebar from "./lib/Sidebar.svelte";
import TaskManager from "./lib/TaskManager.svelte"; import TaskManager from "./lib/TaskManager.svelte";
import UserManager from "./lib/UserManager.svelte"; import UserManager from "./lib/UserManager.svelte";
import Spinner from "./lib/Spinner.svelte";
/** @type {'loading' | 'ok' | 'error' | 'rebooting'} */ /** @type {'loading' | 'ok' | 'error' | 'rebooting'} */
let status = $state("loading"); let status = $state("loading");
@@ -389,6 +390,8 @@
</main> </main>
</div> </div>
<Spinner />
<style> <style>
.app-layout { .app-layout {
display: flex; display: flex;

View File

@@ -0,0 +1,47 @@
<script>
import { pendingRequests } from './stores.js';
import { onMount, onDestroy } from 'svelte';
let showSpinner = false;
let timer;
// Subscribe to the store
const unsubscribe = pendingRequests.subscribe(count => {
if (count > 0) {
// Only show the spinner if the request takes longer than 300ms
if (!timer) {
timer = setTimeout(() => {
showSpinner = true;
}, 300);
}
} else {
// Instantly hide the spinner when all requests finish
clearTimeout(timer);
timer = null;
showSpinner = false;
}
});
onDestroy(() => {
unsubscribe();
if (timer) clearTimeout(timer);
});
</script>
{#if showSpinner}
<div class="fixed inset-0 z-[9999] flex items-center justify-center bg-black/40 backdrop-blur-sm transition-opacity duration-300">
<div class="flex flex-col items-center p-8 bg-surface-base rounded-2xl shadow-xl border border-divider">
<!-- Loading circle animation -->
<div class="w-12 h-12 border-4 border-primary border-t-transparent rounded-full animate-spin"></div>
<p class="mt-4 text-text-primary font-medium tracking-wide animate-pulse">Communicating with Device...</p>
</div>
</div>
{/if}
<style>
/*
* Note: 'bg-surface-base', 'border-divider', 'text-text-primary'
* are assumed to be part of the app's global tailwind theme.
* Adjust classes if necessary.
*/
</style>

View File

@@ -7,13 +7,26 @@
*/ */
const API_BASE = import.meta.env.VITE_API_BASE || ''; const API_BASE = import.meta.env.VITE_API_BASE || '';
import { pendingRequests } from './stores.js';
/**
* Wrapper around fetch that tracks the number of pending requests globally
*/
async function trackedFetch(url, options = {}) {
pendingRequests.update(n => n + 1);
try {
return await fetch(url, options);
} finally {
pendingRequests.update(n => Math.max(0, n - 1));
}
}
/** /**
* Fetch system information from the ESP32. * Fetch system information from the ESP32.
* @returns {Promise<{chip: string, freeHeap: number, uptime: number, firmware: string, connection: string}>} * @returns {Promise<{chip: string, freeHeap: number, uptime: number, firmware: string, connection: string}>}
*/ */
export async function getSystemInfo() { export async function getSystemInfo() {
const res = await fetch(`${API_BASE}/api/system/info`); const res = await trackedFetch(`${API_BASE}/api/system/info`);
if (!res.ok) { if (!res.ok) {
throw new Error(`HTTP ${res.status}: ${res.statusText}`); throw new Error(`HTTP ${res.status}: ${res.statusText}`);
} }
@@ -29,7 +42,7 @@ export async function getSystemInfo() {
* @returns {Promise<{message: string}>} * @returns {Promise<{message: string}>}
*/ */
export async function reboot() { export async function reboot() {
const res = await fetch(`${API_BASE}/api/system/reboot`, { const res = await trackedFetch(`${API_BASE}/api/system/reboot`, {
method: 'POST', method: 'POST',
}); });
if (!res.ok) { if (!res.ok) {
@@ -43,7 +56,7 @@ export async function reboot() {
* @returns {Promise<{active_slot: number, active_partition: string, target_partition: string, partitions: any[], running_firmware_label: string, running_firmware_slot: number}>} * @returns {Promise<{active_slot: number, active_partition: string, target_partition: string, partitions: any[], running_firmware_label: string, running_firmware_slot: number}>}
*/ */
export async function getOTAStatus() { export async function getOTAStatus() {
const res = await fetch(`${API_BASE}/api/ota/status`); const res = await trackedFetch(`${API_BASE}/api/ota/status`);
if (!res.ok) { if (!res.ok) {
throw new Error(`HTTP ${res.status}: ${res.statusText}`); throw new Error(`HTTP ${res.status}: ${res.statusText}`);
} }
@@ -56,7 +69,7 @@ export async function getOTAStatus() {
* @returns {Promise<{status: string, message: string}>} * @returns {Promise<{status: string, message: string}>}
*/ */
export async function uploadOTAFrontend(file) { export async function uploadOTAFrontend(file) {
const res = await fetch(`${API_BASE}/api/ota/frontend`, { const res = await trackedFetch(`${API_BASE}/api/ota/frontend`, {
method: 'POST', method: 'POST',
body: file, // Send the raw file Blob/Buffer body: file, // Send the raw file Blob/Buffer
headers: { headers: {
@@ -79,7 +92,7 @@ export async function uploadOTAFrontend(file) {
* @returns {Promise<{status: string, message: string}>} * @returns {Promise<{status: string, message: string}>}
*/ */
export async function uploadOTAFirmware(file) { export async function uploadOTAFirmware(file) {
const res = await fetch(`${API_BASE}/api/ota/firmware`, { const res = await trackedFetch(`${API_BASE}/api/ota/firmware`, {
method: 'POST', method: 'POST',
body: file, body: file,
headers: { headers: {
@@ -101,7 +114,7 @@ export async function uploadOTAFirmware(file) {
* @returns {Promise<{status: string, message: string}>} * @returns {Promise<{status: string, message: string}>}
*/ */
export async function uploadOTABundle(file) { export async function uploadOTABundle(file) {
const res = await fetch(`${API_BASE}/api/ota/bundle`, { const res = await trackedFetch(`${API_BASE}/api/ota/bundle`, {
method: 'POST', method: 'POST',
body: file, body: file,
headers: { headers: {
@@ -124,7 +137,7 @@ export async function uploadOTABundle(file) {
* @returns {Promise<Array<{id: number, name: string}>>} * @returns {Promise<Array<{id: number, name: string}>>}
*/ */
export async function getUsers() { export async function getUsers() {
const res = await fetch(`${API_BASE}/api/users`); const res = await trackedFetch(`${API_BASE}/api/users`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`); if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json(); return res.json();
} }
@@ -135,7 +148,7 @@ export async function getUsers() {
* @returns {Promise<{id: number, name: string}>} * @returns {Promise<{id: number, name: string}>}
*/ */
export async function addUser(name) { export async function addUser(name) {
const res = await fetch(`${API_BASE}/api/users`, { const res = await trackedFetch(`${API_BASE}/api/users`, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name }) body: JSON.stringify({ name })
@@ -153,7 +166,7 @@ export async function addUser(name) {
* @returns {Promise<{status: string}>} * @returns {Promise<{status: string}>}
*/ */
export async function removeUser(id) { export async function removeUser(id) {
const res = await fetch(`${API_BASE}/api/users?id=${id}`, { const res = await trackedFetch(`${API_BASE}/api/users?id=${id}`, {
method: 'DELETE' method: 'DELETE'
}); });
if (!res.ok) { if (!res.ok) {
@@ -170,7 +183,7 @@ export async function removeUser(id) {
* @returns {Promise<{status: string}>} * @returns {Promise<{status: string}>}
*/ */
export async function updateUser(id, name) { export async function updateUser(id, name) {
const res = await fetch(`${API_BASE}/api/users/update`, { const res = await trackedFetch(`${API_BASE}/api/users/update`, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ id, name }) body: JSON.stringify({ id, name })
@@ -190,7 +203,7 @@ export async function updateUser(id, name) {
* @returns {Promise<Array<{id: number, user_id: number, title: string, due_date: number, completed: boolean}>>} * @returns {Promise<Array<{id: number, user_id: number, title: string, due_date: number, completed: boolean}>>}
*/ */
export async function getTasks(userId) { export async function getTasks(userId) {
const res = await fetch(`${API_BASE}/api/tasks?user_id=${userId}`); const res = await trackedFetch(`${API_BASE}/api/tasks?user_id=${userId}`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`); if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json(); return res.json();
} }
@@ -200,7 +213,7 @@ export async function getTasks(userId) {
* @returns {Promise<{users: Array<{id: number, name: string, tasks: Array}>}>} * @returns {Promise<{users: Array<{id: number, name: string, tasks: Array}>}>}
*/ */
export async function getUpcomingTasks() { export async function getUpcomingTasks() {
const res = await fetch(`${API_BASE}/api/tasks/upcoming`); const res = await trackedFetch(`${API_BASE}/api/tasks/upcoming`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`); if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json(); return res.json();
} }
@@ -213,7 +226,7 @@ export async function getUpcomingTasks() {
* @returns {Promise<{id: number, user_id: number, title: string, due_date: number, completed: boolean}>} * @returns {Promise<{id: number, user_id: number, title: string, due_date: number, completed: boolean}>}
*/ */
export async function addTask(userId, title, dueDate) { export async function addTask(userId, title, dueDate) {
const res = await fetch(`${API_BASE}/api/tasks`, { const res = await trackedFetch(`${API_BASE}/api/tasks`, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ user_id: userId, title, due_date: dueDate }) body: JSON.stringify({ user_id: userId, title, due_date: dueDate })
@@ -232,7 +245,7 @@ export async function addTask(userId, title, dueDate) {
* @returns {Promise<{status: string}>} * @returns {Promise<{status: string}>}
*/ */
export async function updateTask(id, fields) { export async function updateTask(id, fields) {
const res = await fetch(`${API_BASE}/api/tasks/update`, { const res = await trackedFetch(`${API_BASE}/api/tasks/update`, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ id, ...fields }) body: JSON.stringify({ id, ...fields })
@@ -250,7 +263,7 @@ export async function updateTask(id, fields) {
* @returns {Promise<{status: string}>} * @returns {Promise<{status: string}>}
*/ */
export async function deleteTask(id) { export async function deleteTask(id) {
const res = await fetch(`${API_BASE}/api/tasks?id=${id}`, { const res = await trackedFetch(`${API_BASE}/api/tasks?id=${id}`, {
method: 'DELETE' method: 'DELETE'
}); });
if (!res.ok) { if (!res.ok) {

View File

@@ -0,0 +1,3 @@
import { writable } from 'svelte/store';
export const pendingRequests = writable(0);

View File

@@ -28,11 +28,53 @@ constexpr uint8 kGZ_Extension_Length = sizeof(".gz") - 1;
#define FILE_PATH_MAX (ESP_VFS_PATH_MAX + 128) #define FILE_PATH_MAX (ESP_VFS_PATH_MAX + 128)
#define SCRATCH_BUFSIZE 4096 #define SCRATCH_BUFSIZE 4096
#define MAX_SCRATCH_BUFFERS 10
typedef struct typedef struct
{ {
char scratch[SCRATCH_BUFSIZE]; char *buffers[MAX_SCRATCH_BUFFERS];
} http_server_data_t; bool in_use[MAX_SCRATCH_BUFFERS];
} scratch_pool_t;
static scratch_pool_t global_scratch_pool = {};
char *get_scratch_buffer()
{
for (int i = 0; i < MAX_SCRATCH_BUFFERS; i++)
{
if (!global_scratch_pool.in_use[i])
{
if (global_scratch_pool.buffers[i] == NULL)
{
global_scratch_pool.buffers[i] = (char *)malloc(SCRATCH_BUFSIZE);
if (global_scratch_pool.buffers[i] == NULL)
{
ESP_LOGE(TAG, "Failed to allocate scratch buffer from heap!");
return NULL;
}
}
global_scratch_pool.in_use[i] = true;
return global_scratch_pool.buffers[i];
}
}
ESP_LOGE(TAG, "All scratch buffers in use! Increase MAX_SCRATCH_BUFFERS");
return NULL;
}
void free_scratch_buffer(char *buffer)
{
if (buffer == NULL)
return;
for (int i = 0; i < MAX_SCRATCH_BUFFERS; i++)
{
if (global_scratch_pool.buffers[i] == buffer)
{
global_scratch_pool.in_use[i] = false;
return;
}
}
ESP_LOGE(TAG, "Attempted to free unknown scratch buffer!");
}
#ifdef CONFIG_CALENDINK_DEPLOY_WEB_PAGES #ifdef CONFIG_CALENDINK_DEPLOY_WEB_PAGES
// Set HTTP response content type according to file extension // Set HTTP response content type according to file extension
@@ -138,8 +180,14 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
set_content_type_from_file(req, filepath); set_content_type_from_file(req, filepath);
http_server_data_t *rest_context = (http_server_data_t *)req->user_ctx; char *chunk = get_scratch_buffer();
char *chunk = rest_context->scratch; if (chunk == NULL)
{
close(fd);
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Server busy");
return ESP_FAIL;
}
ssize_t read_bytes; ssize_t read_bytes;
do do
@@ -155,6 +203,7 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
{ {
close(fd); close(fd);
ESP_LOGE(TAG, "File sending failed!"); ESP_LOGE(TAG, "File sending failed!");
free_scratch_buffer(chunk);
httpd_resp_sendstr_chunk(req, NULL); // Abort sending httpd_resp_sendstr_chunk(req, NULL); // Abort sending
return ESP_FAIL; return ESP_FAIL;
} }
@@ -162,6 +211,7 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
} while (read_bytes > 0); } while (read_bytes > 0);
close(fd); close(fd);
free_scratch_buffer(chunk);
httpd_resp_send_chunk(req, NULL, 0); // End response httpd_resp_send_chunk(req, NULL, 0); // End response
return ESP_OK; return ESP_OK;
@@ -210,17 +260,12 @@ internal httpd_handle_t start_webserver(void)
ESP_LOGI(TAG, "LittleFS mounted on /www"); ESP_LOGI(TAG, "LittleFS mounted on /www");
} }
#endif #endif
http_server_data_t *rest_context =
(http_server_data_t *)calloc(1, sizeof(http_server_data_t));
if (rest_context == NULL)
{
ESP_LOGE(TAG, "No memory for rest context");
return NULL;
}
httpd_config_t config = HTTPD_DEFAULT_CONFIG(); httpd_config_t config = HTTPD_DEFAULT_CONFIG();
config.uri_match_fn = httpd_uri_match_wildcard; config.uri_match_fn = httpd_uri_match_wildcard;
config.max_uri_handlers = 20; config.max_uri_handlers = 20;
config.max_open_sockets = 24;
config.lru_purge_enable = true;
httpd_handle_t server = NULL; httpd_handle_t server = NULL;
ESP_LOGI(TAG, "Starting HTTP Server on port: '%d'", config.server_port); ESP_LOGI(TAG, "Starting HTTP Server on port: '%d'", config.server_port);
@@ -262,7 +307,7 @@ internal httpd_handle_t start_webserver(void)
httpd_uri_t static_get_uri = {.uri = "/*", httpd_uri_t static_get_uri = {.uri = "/*",
.method = HTTP_GET, .method = HTTP_GET,
.handler = static_file_handler, .handler = static_file_handler,
.user_ctx = rest_context}; .user_ctx = NULL};
httpd_register_uri_handler(server, &static_get_uri); httpd_register_uri_handler(server, &static_get_uri);
#endif #endif
@@ -270,7 +315,6 @@ internal httpd_handle_t start_webserver(void)
} }
ESP_LOGE(TAG, "Error starting server!"); ESP_LOGE(TAG, "Error starting server!");
free(rest_context);
return NULL; return NULL;
} }

View File

@@ -1,3 +1,4 @@
CONFIG_PARTITION_TABLE_CUSTOM=y CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv" CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv" CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"
CONFIG_LWIP_MAX_SOCKETS=32

View File

@@ -21,16 +21,16 @@ Furthermore, the current `static_file_handler` relies on a single shared `rest_c
### 3.1 Backend Configuration (ESP-IDF) ### 3.1 Backend Configuration (ESP-IDF)
Instead of implementing complex multi-threading (spawning multiple FreeRTOS worker tasks), we will leverage the HTTP server's built-in event loop multiplexing by tuning its configuration: Instead of implementing complex multi-threading (spawning multiple FreeRTOS worker tasks), we will leverage the HTTP server's built-in event loop multiplexing by tuning its configuration:
1. **Increase Socket Limit**: Set `config.max_open_sockets = 10` (or up to `LWIP_MAX_SOCKETS` limit) to provide more headroom for initial connections. 1. **Increase LwIP Socket Limit**: `LWIP_MAX_SOCKETS` is set to `32` in `sdkconfig.defaults`.
2. **Enable Stale Socket Purging**: Set `config.lru_purge_enable = true`. This is the critical fix. When the socket limit is reached and a new device attempts to connect, the server will intentionally drop the oldest idle keep-alive socket to make room, allowing the new device to load the page seamlessly. 2. **Increase HTTP Socket Limit**: Set `config.max_open_sockets = 24`. This deliberately reserves `8` sockets for LwIP internals and outwards connections, guaranteeing the network stack always has headroom to accept a TCP handshake from a new client.
3. **Enable Stale Socket Purging**: Set `config.lru_purge_enable = true`. This is the critical fix. When the 24 socket limit is reached and a new device attempts to connect, the server will intentionally drop the oldest idle keep-alive socket to make room, allowing the new device to load the page seamlessly.
### 3.2 Backend Scratch Buffer Pooling ### 3.2 Backend Scratch Buffer Pooling
To safely support multiplexed file serving without heavy `malloc`/`free` overhead on every request, we will replace the single shared scratch buffer with a **dynamically growing Shared Buffer Pool**: To safely support multiplexed file serving without heavy `malloc`/`free` overhead on every request, we will replace the single shared scratch buffer with a **Static Shared Buffer Pool**:
- We will allocate a global pool of scratch memory chunks. - We allocated a global struct with a fixed array of `MAX_SCRATCH_BUFFERS = 10`.
- When `static_file_handler` begins, it will request an available chunk from the pool. - When `static_file_handler` begins, it will request an available chunk from the pool, allocating a 4KB chunk on the heap only the first time it is used.
- If all chunks are currently in use by other concurrent requests, the pool will use `realloc` to expand its capacity and create a new chunk.
- When the handler finishes, the chunk is marked as available yielding it for the next request. - When the handler finishes, the chunk is marked as available yielding it for the next request.
- This provides isolation between concurrent connections while minimizing heap fragmentation compared to per-request `mallocs`. - This provides isolation between up to 10 active transmission connections while minimizing heap fragmentation compared to per-request `mallocs`.
### 3.3 Frontend Safety (Loading Spinner) ### 3.3 Frontend Safety (Loading Spinner)
Even with backend improvements, network latency or heavy load might cause delays. We will implement a global request tracker to improve perceived performance: Even with backend improvements, network latency or heavy load might cause delays. We will implement a global request tracker to improve perceived performance:
@@ -44,7 +44,7 @@ Even with backend improvements, network latency or heavy load might cause delays
|---|---|---|---| |---|---|---|---|
| **True Multi-Threading (Multiple Worker Tasks)** | Can process files fully in parallel on both cores. | High memory overhead for stack space per task; over-engineered for simple static file serving. | **Rejected**. Relying on the event loop's multiplexing is sufficient for local network use cases. | | **True Multi-Threading (Multiple Worker Tasks)** | Can process files fully in parallel on both cores. | High memory overhead for stack space per task; over-engineered for simple static file serving. | **Rejected**. Relying on the event loop's multiplexing is sufficient for local network use cases. |
| **Per-Request `malloc` / `free`** | Simplest way to isolate scratch buffers. | High heap fragmentation risk; computationally expensive on every HTTP request. | **Rejected**. | | **Per-Request `malloc` / `free`** | Simplest way to isolate scratch buffers. | High heap fragmentation risk; computationally expensive on every HTTP request. | **Rejected**. |
| **Dynamically Resizing Pool (`realloc`)** | Low overhead; memory footprint only grows organically to the maximum concurrent need and stabilizes. | Slightly more complex to implement the pool state management. | **Selected**. Best balance of performance and memory safety. | | **Fixed Pool (10 buffers)** | Low overhead; memory footprint only grows organically to the maximum concurrent need limit (10 * 4KB = 40KB) and stabilizes. | Strict limit on how many connections can be actively transmitting data at the exact same millisecond. | **Selected**. Best balance of performance and memory safety. |
## 5. Potential Future Improvements ## 5. Potential Future Improvements
- If the `realloc` pool grows too large during an unexpected spike, we could implement a cleanup routine that periodically shrinks the pool back to a baseline size when the server is idle. - If the `realloc` pool grows too large during an unexpected spike, we could implement a cleanup routine that periodically shrinks the pool back to a baseline size when the server is idle.