diff --git a/Provider/frontend/package.json b/Provider/frontend/package.json
index 2741ed2..685c042 100644
--- a/Provider/frontend/package.json
+++ b/Provider/frontend/package.json
@@ -21,4 +21,4 @@
"vite": "^7.3.1",
"vite-plugin-singlefile": "^2.3.0"
}
-}
\ No newline at end of file
+}
diff --git a/Provider/frontend/src/App.svelte b/Provider/frontend/src/App.svelte
index b8f38e3..e287e02 100644
--- a/Provider/frontend/src/App.svelte
+++ b/Provider/frontend/src/App.svelte
@@ -4,6 +4,7 @@
import Sidebar from "./lib/Sidebar.svelte";
import TaskManager from "./lib/TaskManager.svelte";
import UserManager from "./lib/UserManager.svelte";
+ import Spinner from "./lib/Spinner.svelte";
/** @type {'loading' | 'ok' | 'error' | 'rebooting'} */
let status = $state("loading");
@@ -389,6 +390,8 @@
+
+
diff --git a/Provider/frontend/src/lib/api.js b/Provider/frontend/src/lib/api.js
index 8d11e30..46a0b0b 100644
--- a/Provider/frontend/src/lib/api.js
+++ b/Provider/frontend/src/lib/api.js
@@ -7,13 +7,26 @@
*/
const API_BASE = import.meta.env.VITE_API_BASE || '';
+import { pendingRequests } from './stores.js';
+
+/**
+ * Wrapper around fetch that tracks the number of pending requests globally
+ */
+async function trackedFetch(url, options = {}) {
+ pendingRequests.update(n => n + 1);
+ try {
+ return await fetch(url, options);
+ } finally {
+ pendingRequests.update(n => Math.max(0, n - 1));
+ }
+}
/**
* Fetch system information from the ESP32.
* @returns {Promise<{chip: string, freeHeap: number, uptime: number, firmware: string, connection: string}>}
*/
export async function getSystemInfo() {
- const res = await fetch(`${API_BASE}/api/system/info`);
+ const res = await trackedFetch(`${API_BASE}/api/system/info`);
if (!res.ok) {
throw new Error(`HTTP ${res.status}: ${res.statusText}`);
}
@@ -29,7 +42,7 @@ export async function getSystemInfo() {
* @returns {Promise<{message: string}>}
*/
export async function reboot() {
- const res = await fetch(`${API_BASE}/api/system/reboot`, {
+ const res = await trackedFetch(`${API_BASE}/api/system/reboot`, {
method: 'POST',
});
if (!res.ok) {
@@ -43,7 +56,7 @@ export async function reboot() {
* @returns {Promise<{active_slot: number, active_partition: string, target_partition: string, partitions: any[], running_firmware_label: string, running_firmware_slot: number}>}
*/
export async function getOTAStatus() {
- const res = await fetch(`${API_BASE}/api/ota/status`);
+ const res = await trackedFetch(`${API_BASE}/api/ota/status`);
if (!res.ok) {
throw new Error(`HTTP ${res.status}: ${res.statusText}`);
}
@@ -56,7 +69,7 @@ export async function getOTAStatus() {
* @returns {Promise<{status: string, message: string}>}
*/
export async function uploadOTAFrontend(file) {
- const res = await fetch(`${API_BASE}/api/ota/frontend`, {
+ const res = await trackedFetch(`${API_BASE}/api/ota/frontend`, {
method: 'POST',
body: file, // Send the raw file Blob/Buffer
headers: {
@@ -79,7 +92,7 @@ export async function uploadOTAFrontend(file) {
* @returns {Promise<{status: string, message: string}>}
*/
export async function uploadOTAFirmware(file) {
- const res = await fetch(`${API_BASE}/api/ota/firmware`, {
+ const res = await trackedFetch(`${API_BASE}/api/ota/firmware`, {
method: 'POST',
body: file,
headers: {
@@ -101,7 +114,7 @@ export async function uploadOTAFirmware(file) {
* @returns {Promise<{status: string, message: string}>}
*/
export async function uploadOTABundle(file) {
- const res = await fetch(`${API_BASE}/api/ota/bundle`, {
+ const res = await trackedFetch(`${API_BASE}/api/ota/bundle`, {
method: 'POST',
body: file,
headers: {
@@ -124,7 +137,7 @@ export async function uploadOTABundle(file) {
* @returns {Promise>}
*/
export async function getUsers() {
- const res = await fetch(`${API_BASE}/api/users`);
+ const res = await trackedFetch(`${API_BASE}/api/users`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json();
}
@@ -135,7 +148,7 @@ export async function getUsers() {
* @returns {Promise<{id: number, name: string}>}
*/
export async function addUser(name) {
- const res = await fetch(`${API_BASE}/api/users`, {
+ const res = await trackedFetch(`${API_BASE}/api/users`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name })
@@ -153,7 +166,7 @@ export async function addUser(name) {
* @returns {Promise<{status: string}>}
*/
export async function removeUser(id) {
- const res = await fetch(`${API_BASE}/api/users?id=${id}`, {
+ const res = await trackedFetch(`${API_BASE}/api/users?id=${id}`, {
method: 'DELETE'
});
if (!res.ok) {
@@ -170,7 +183,7 @@ export async function removeUser(id) {
* @returns {Promise<{status: string}>}
*/
export async function updateUser(id, name) {
- const res = await fetch(`${API_BASE}/api/users/update`, {
+ const res = await trackedFetch(`${API_BASE}/api/users/update`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ id, name })
@@ -190,7 +203,7 @@ export async function updateUser(id, name) {
* @returns {Promise>}
*/
export async function getTasks(userId) {
- const res = await fetch(`${API_BASE}/api/tasks?user_id=${userId}`);
+ const res = await trackedFetch(`${API_BASE}/api/tasks?user_id=${userId}`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json();
}
@@ -200,7 +213,7 @@ export async function getTasks(userId) {
* @returns {Promise<{users: Array<{id: number, name: string, tasks: Array}>}>}
*/
export async function getUpcomingTasks() {
- const res = await fetch(`${API_BASE}/api/tasks/upcoming`);
+ const res = await trackedFetch(`${API_BASE}/api/tasks/upcoming`);
if (!res.ok) throw new Error(`HTTP ${res.status}: ${res.statusText}`);
return res.json();
}
@@ -213,7 +226,7 @@ export async function getUpcomingTasks() {
* @returns {Promise<{id: number, user_id: number, title: string, due_date: number, completed: boolean}>}
*/
export async function addTask(userId, title, dueDate) {
- const res = await fetch(`${API_BASE}/api/tasks`, {
+ const res = await trackedFetch(`${API_BASE}/api/tasks`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ user_id: userId, title, due_date: dueDate })
@@ -232,7 +245,7 @@ export async function addTask(userId, title, dueDate) {
* @returns {Promise<{status: string}>}
*/
export async function updateTask(id, fields) {
- const res = await fetch(`${API_BASE}/api/tasks/update`, {
+ const res = await trackedFetch(`${API_BASE}/api/tasks/update`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ id, ...fields })
@@ -250,7 +263,7 @@ export async function updateTask(id, fields) {
* @returns {Promise<{status: string}>}
*/
export async function deleteTask(id) {
- const res = await fetch(`${API_BASE}/api/tasks?id=${id}`, {
+ const res = await trackedFetch(`${API_BASE}/api/tasks?id=${id}`, {
method: 'DELETE'
});
if (!res.ok) {
diff --git a/Provider/frontend/src/lib/stores.js b/Provider/frontend/src/lib/stores.js
new file mode 100644
index 0000000..f98369b
--- /dev/null
+++ b/Provider/frontend/src/lib/stores.js
@@ -0,0 +1,3 @@
+import { writable } from 'svelte/store';
+
+export const pendingRequests = writable(0);
diff --git a/Provider/main/http_server.cpp b/Provider/main/http_server.cpp
index cca72c7..89ba776 100644
--- a/Provider/main/http_server.cpp
+++ b/Provider/main/http_server.cpp
@@ -28,11 +28,53 @@ constexpr uint8 kGZ_Extension_Length = sizeof(".gz") - 1;
#define FILE_PATH_MAX (ESP_VFS_PATH_MAX + 128)
#define SCRATCH_BUFSIZE 4096
+#define MAX_SCRATCH_BUFFERS 10
typedef struct
{
- char scratch[SCRATCH_BUFSIZE];
-} http_server_data_t;
+ char *buffers[MAX_SCRATCH_BUFFERS];
+ bool in_use[MAX_SCRATCH_BUFFERS];
+} scratch_pool_t;
+
+static scratch_pool_t global_scratch_pool = {};
+
+char *get_scratch_buffer()
+{
+ for (int i = 0; i < MAX_SCRATCH_BUFFERS; i++)
+ {
+ if (!global_scratch_pool.in_use[i])
+ {
+ if (global_scratch_pool.buffers[i] == NULL)
+ {
+ global_scratch_pool.buffers[i] = (char *)malloc(SCRATCH_BUFSIZE);
+ if (global_scratch_pool.buffers[i] == NULL)
+ {
+ ESP_LOGE(TAG, "Failed to allocate scratch buffer from heap!");
+ return NULL;
+ }
+ }
+ global_scratch_pool.in_use[i] = true;
+ return global_scratch_pool.buffers[i];
+ }
+ }
+ ESP_LOGE(TAG, "All scratch buffers in use! Increase MAX_SCRATCH_BUFFERS");
+ return NULL;
+}
+
+void free_scratch_buffer(char *buffer)
+{
+ if (buffer == NULL)
+ return;
+ for (int i = 0; i < MAX_SCRATCH_BUFFERS; i++)
+ {
+ if (global_scratch_pool.buffers[i] == buffer)
+ {
+ global_scratch_pool.in_use[i] = false;
+ return;
+ }
+ }
+ ESP_LOGE(TAG, "Attempted to free unknown scratch buffer!");
+}
#ifdef CONFIG_CALENDINK_DEPLOY_WEB_PAGES
// Set HTTP response content type according to file extension
@@ -138,8 +180,14 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
set_content_type_from_file(req, filepath);
- http_server_data_t *rest_context = (http_server_data_t *)req->user_ctx;
- char *chunk = rest_context->scratch;
+ char *chunk = get_scratch_buffer();
+ if (chunk == NULL)
+ {
+ close(fd);
+ httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Server busy");
+ return ESP_FAIL;
+ }
+
ssize_t read_bytes;
do
@@ -155,6 +203,7 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
{
close(fd);
ESP_LOGE(TAG, "File sending failed!");
+ free_scratch_buffer(chunk);
httpd_resp_sendstr_chunk(req, NULL); // Abort sending
return ESP_FAIL;
}
@@ -162,6 +211,7 @@ internal esp_err_t static_file_handler(httpd_req_t *req)
} while (read_bytes > 0);
close(fd);
+ free_scratch_buffer(chunk);
httpd_resp_send_chunk(req, NULL, 0); // End response
return ESP_OK;
@@ -210,17 +260,12 @@ internal httpd_handle_t start_webserver(void)
ESP_LOGI(TAG, "LittleFS mounted on /www");
}
#endif
- http_server_data_t *rest_context =
- (http_server_data_t *)calloc(1, sizeof(http_server_data_t));
- if (rest_context == NULL)
- {
- ESP_LOGE(TAG, "No memory for rest context");
- return NULL;
- }
httpd_config_t config = HTTPD_DEFAULT_CONFIG();
config.uri_match_fn = httpd_uri_match_wildcard;
config.max_uri_handlers = 20;
+ config.max_open_sockets = 24;
+ config.lru_purge_enable = true;
httpd_handle_t server = NULL;
ESP_LOGI(TAG, "Starting HTTP Server on port: '%d'", config.server_port);
@@ -262,7 +307,7 @@ internal httpd_handle_t start_webserver(void)
httpd_uri_t static_get_uri = {.uri = "/*",
.method = HTTP_GET,
.handler = static_file_handler,
- .user_ctx = rest_context};
+ .user_ctx = NULL};
httpd_register_uri_handler(server, &static_get_uri);
#endif
@@ -270,7 +315,6 @@ internal httpd_handle_t start_webserver(void)
}
ESP_LOGE(TAG, "Error starting server!");
- free(rest_context);
return NULL;
}
diff --git a/Provider/sdkconfig.defaults b/Provider/sdkconfig.defaults
index 8ff365b..38f92f5 100644
--- a/Provider/sdkconfig.defaults
+++ b/Provider/sdkconfig.defaults
@@ -1,3 +1,4 @@
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"
+CONFIG_LWIP_MAX_SOCKETS=32
diff --git a/Provider/tdd/concurrent_requests.md b/Provider/tdd/concurrent_requests.md
new file mode 100644
index 0000000..7749fb5
--- /dev/null
+++ b/Provider/tdd/concurrent_requests.md
@@ -0,0 +1,51 @@
+# Concurrent Requests Support for ESP32-S3 Provider
+
+**Authored by Antigravity**
+**Date:** 2026-03-08
+
+---
+
+## 1. Goal (What)
+
+Enable the ESP32-S3 HTTP server to gracefully handle multiple concurrent web clients. Currently, if one browser connects, it consumes all available server sockets with "keep-alive" connections and blocks the `static_file_handler` via a single shared scratch buffer. The goal is to allow up to 5-10 clients (e.g., PCs, tablets, phones) on the local network to open the dashboard simultaneously without hanging, and to add a frontend safeguard (a loading spinner) to improve the user experience during slow network responses.
+
+## 2. Rationale (Why)
+
+The default ESP-IDF HTTP server (`esp_http_server`) is configured for minimal resource usage:
+- `max_open_sockets = 7`: A single modern browser tries to open up to 6 connections simultaneously to fetch HTML, CSS, JS, and API data.
+- `lru_purge_enable = false`: When a browser finishes loading, it keeps those 6 sockets open (Keep-Alive) for future requests. If a second device tries to connect, it is rejected because the server has no free sockets, even though the first device's sockets are idle.
+
+Furthermore, the current `static_file_handler` relies on a single shared `rest_context->scratch` buffer allocated globally. If the server is modified to multiplex handlers concurrently, this shared buffer would be overwritten by competing requests, causing data corruption in the served files.
+
+## 3. Chosen Approach (How)
+
+### 3.1 Backend Configuration (ESP-IDF)
+Instead of implementing complex multi-threading (spawning multiple FreeRTOS worker tasks), we will leverage the HTTP server's built-in event loop multiplexing by tuning its configuration:
+1. **Increase LwIP Socket Limit**: `LWIP_MAX_SOCKETS` is set to `32` in `sdkconfig.defaults`.
+2. **Increase HTTP Socket Limit**: Set `config.max_open_sockets = 24`. This deliberately reserves `8` sockets for LwIP internals and outwards connections, guaranteeing the network stack always has headroom to accept a TCP handshake from a new client.
+3. **Enable Stale Socket Purging**: Set `config.lru_purge_enable = true`. This is the critical fix. When the 24 socket limit is reached and a new device attempts to connect, the server will intentionally drop the oldest idle keep-alive socket to make room, allowing the new device to load the page seamlessly.
+
+### 3.2 Backend Scratch Buffer Pooling
+To safely support multiplexed file serving without heavy `malloc`/`free` overhead on every request, we will replace the single shared scratch buffer with a **Static Shared Buffer Pool**:
+- We allocated a global struct with a fixed array of `MAX_SCRATCH_BUFFERS = 10`.
+- When `static_file_handler` begins, it will request an available chunk from the pool, allocating a 4KB chunk on the heap only the first time it is used.
+- When the handler finishes, the chunk is marked as available yielding it for the next request.
+- This provides isolation between up to 10 active transmission connections while minimizing heap fragmentation compared to per-request `mallocs`.
+
+### 3.3 Frontend Safety (Loading Spinner)
+Even with backend improvements, network latency or heavy load might cause delays. We will implement a global request tracker to improve perceived performance:
+- A new Svelte writable store `pendingRequests` will track the count of active API calls.
+- `api.js` will wrap the native `fetch` in a `trackedFetch` function that increments/decrements this store.
+- A new `` component will be rendered at the root (`App.svelte`). It will overlay the screen when `$pendingRequests > 0`, optionally with a small delay (e.g., 300ms) to prevent flashing on fast requests.
+
+## 4. Design Decisions & Trade-offs
+
+| Approach | Pros | Cons | Decision |
+|---|---|---|---|
+| **True Multi-Threading (Multiple Worker Tasks)** | Can process files fully in parallel on both cores. | High memory overhead for stack space per task; over-engineered for simple static file serving. | **Rejected**. Relying on the event loop's multiplexing is sufficient for local network use cases. |
+| **Per-Request `malloc` / `free`** | Simplest way to isolate scratch buffers. | High heap fragmentation risk; computationally expensive on every HTTP request. | **Rejected**. |
+| **Fixed Pool (10 buffers)** | Low overhead; memory footprint only grows organically to the maximum concurrent need limit (10 * 4KB = 40KB) and stabilizes. | Strict limit on how many connections can be actively transmitting data at the exact same millisecond. | **Selected**. Best balance of performance and memory safety. |
+
+## 5. Potential Future Improvements
+- If the `realloc` pool grows too large during an unexpected spike, we could implement a cleanup routine that periodically shrinks the pool back to a baseline size when the server is idle.
+- If true parallel processing is needed later, the HTTP server's `config.core_id` and `async_workers` could be utilized, but this requires ensuring all API handlers are perfectly thread-safe.