Skip to content

Batch Data Upload

High-volume data transmission endpoint for uploading compressed batches of biomonitoring data.

POST /api/v1/data/batch
  • Upload large volumes of historical data
  • Transmit data collected during offline periods
  • Send raw sensor data for advanced analysis
  • Optimize bandwidth for high-resolution recordings
Content-Type: application/json
Authorization: Bearer {device_token}
Content-Encoding: gzip
{
"device_id": "BCGMCU_001",
"batch_info": {
"batch_id": "BATCH_001_20250127_103000",
"start_timestamp": "2025-01-27T09:30:00.000Z",
"end_timestamp": "2025-01-27T10:30:00.000Z",
"sample_count": 3600,
"data_checksum": "sha256:a1b2c3d4e5f6...",
"compression": "gzip"
},
"raw_data": [
{
"timestamp": "2025-01-27T09:30:00.000Z",
"accelerometer": {"x": 0.012, "y": -0.003, "z": 0.998},
"processed": {"pulse": 72, "breathing": 16}
},
{
"timestamp": "2025-01-27T09:30:01.000Z",
"accelerometer": {"x": 0.015, "y": -0.001, "z": 0.995},
"processed": {"pulse": 73, "breathing": 16}
}
]
}
FieldTypeRequiredDescription
batch_idstringYesUnique batch identifier
start_timestampstringYesFirst sample timestamp
end_timestampstringYesLast sample timestamp
sample_countintegerYesTotal number of samples
data_checksumstringYesSHA-256 checksum for validation
compressionstringYesCompression method (“gzip”, “none”)

Each sample contains:

FieldTypeRequiredDescription
timestampstringYesSample timestamp
accelerometerobjectNoRaw 3-axis accelerometer data
processedobjectYesProcessed biomonitoring data
{
"batch_status": "accepted",
"batch_id": "BATCH_001_20250127_103000",
"server_timestamp": "2025-01-27T10:30:16.000Z",
"checksum_verified": true,
"samples_processed": 3600,
"storage_location": "s3://bcg-data/2025/01/27/BCGMCU_001/",
"processing_job_id": "job_abc123"
}
FieldTypeDescription
batch_statusstring”accepted”, “processing”, or “rejected”
batch_idstringConfirmed batch identifier
server_timestampstringServer processing timestamp
checksum_verifiedbooleanData integrity verification result
samples_processedintegerNumber of samples successfully processed
storage_locationstringCloud storage location
processing_job_idstringBackground processing job ID
ParameterLimitRecommendation
Max Samples100,00010,000 for optimal performance
Max Payload50 MB10 MB compressed
Max Duration24 hours1 hour batches
CompressionRequired > 1MBgzip recommended
#include <HTTPClient.h>
#include <ArduinoJson.h>
#include <FS.h>
#include <SPIFFS.h>
#include <mbedtls/sha256.h>
class BatchUploader {
private:
File batchFile;
String currentBatchId;
int sampleCount = 0;
public:
void startBatch() {
// Create unique batch ID
currentBatchId = "BATCH_" + getDeviceId() + "_" + getTimestamp();
// Open file for writing
String filename = "/batches/" + currentBatchId + ".json";
batchFile = SPIFFS.open(filename, "w");
// Write batch header
batchFile.println("{");
batchFile.println("\"device_id\":\"" + getDeviceId() + "\",");
batchFile.println("\"batch_info\":{");
batchFile.println("\"batch_id\":\"" + currentBatchId + "\",");
batchFile.println("\"start_timestamp\":\"" + getCurrentISO8601() + "\"");
batchFile.println("},");
batchFile.println("\"raw_data\":[");
sampleCount = 0;
}
void addSample(BCGMCU::RawSample& sample) {
if (!batchFile) return;
if (sampleCount > 0) {
batchFile.println(",");
}
// Write sample data
batchFile.println("{");
batchFile.println("\"timestamp\":\"" + sample.timestamp + "\",");
// Raw accelerometer
batchFile.println("\"accelerometer\":{");
batchFile.println("\"x\":" + String(sample.accel_x, 6) + ",");
batchFile.println("\"y\":" + String(sample.accel_y, 6) + ",");
batchFile.println("\"z\":" + String(sample.accel_z, 6));
batchFile.println("},");
// Processed data
batchFile.println("\"processed\":{");
batchFile.println("\"pulse\":" + String(sample.pulse_rate) + ",");
batchFile.println("\"breathing\":" + String(sample.respiratory_rate));
batchFile.println("}");
batchFile.print("}");
sampleCount++;
// Check if batch is full
if (sampleCount >= MAX_BATCH_SIZE) {
finalizeBatch();
}
}
bool finalizeBatch() {
if (!batchFile) return false;
// Close raw_data array
batchFile.println("]");
// Update batch info
batchFile.println(",\"batch_info\":{");
batchFile.println("\"end_timestamp\":\"" + getCurrentISO8601() + "\",");
batchFile.println("\"sample_count\":" + String(sampleCount));
batchFile.println("}}");
batchFile.close();
// Calculate checksum and upload
return uploadBatch();
}
private:
bool uploadBatch() {
String filename = "/batches/" + currentBatchId + ".json";
File file = SPIFFS.open(filename, "r");
if (!file) return false;
// Calculate checksum
String checksum = calculateSHA256(file);
file.seek(0);
// Read file content
String batchData = file.readString();
file.close();
// Compress data
String compressedData = compressGzip(batchData);
// Add checksum to batch info
batchData.replace("\"sample_count\":" + String(sampleCount),
"\"sample_count\":" + String(sampleCount) +
",\"data_checksum\":\"sha256:" + checksum + "\"" +
",\"compression\":\"gzip\"");
// Send upload request
HTTPClient https;
https.begin(API_BASE_URL + "/data/batch");
https.addHeader("Content-Type", "application/json");
https.addHeader("Content-Encoding", "gzip");
https.addHeader("Authorization", "Bearer " + deviceToken);
int httpCode = https.POST(compressedData);
if (httpCode == 200) {
// Delete local file after successful upload
SPIFFS.remove(filename);
return true;
}
return false;
}
};
#include <zlib.h>
class DataCompressor {
public:
String compressGzip(const String& data) {
z_stream zs;
memset(&zs, 0, sizeof(zs));
if (deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
15 + 16, 8, Z_DEFAULT_STRATEGY) != Z_OK) {
return "";
}
zs.next_in = (Bytef*)data.c_str();
zs.avail_in = data.length();
String compressed;
char outbuffer[32768];
do {
zs.next_out = (Bytef*)outbuffer;
zs.avail_out = sizeof(outbuffer);
int ret = deflate(&zs, Z_FINISH);
if (compressed.length() < zs.total_out) {
compressed.concat(outbuffer, zs.total_out - compressed.length());
}
} while (zs.avail_out == 0);
deflateEnd(&zs);
Serial.printf("Compression: %d -> %d bytes (%.1f%%)\n",
data.length(), compressed.length(),
100.0 * compressed.length() / data.length());
return compressed;
}
};
class BatchQueue {
private:
struct QueuedBatch {
String batchId;
String filename;
unsigned long timestamp;
int retryCount;
};
std::vector<QueuedBatch> queue;
const int MAX_RETRIES = 3;
public:
void addBatch(String batchId, String filename) {
QueuedBatch batch;
batch.batchId = batchId;
batch.filename = filename;
batch.timestamp = millis();
batch.retryCount = 0;
queue.push_back(batch);
}
void processQueue() {
for (auto it = queue.begin(); it != queue.end();) {
if (uploadBatch(it->filename)) {
// Success - remove from queue
SPIFFS.remove(it->filename);
it = queue.erase(it);
} else {
// Failed - increment retry count
it->retryCount++;
if (it->retryCount >= MAX_RETRIES) {
// Too many failures - remove and log error
Serial.printf("Batch upload failed after %d retries: %s\n",
MAX_RETRIES, it->batchId.c_str());
SPIFFS.remove(it->filename);
it = queue.erase(it);
} else {
++it;
}
}
}
}
int getQueueSize() {
return queue.size();
}
void clearOldBatches() {
unsigned long cutoff = millis() - (24 * 60 * 60 * 1000); // 24 hours
for (auto it = queue.begin(); it != queue.end();) {
if (it->timestamp < cutoff) {
Serial.printf("Removing old batch: %s\n", it->batchId.c_str());
SPIFFS.remove(it->filename);
it = queue.erase(it);
} else {
++it;
}
}
}
};
String calculateSHA256(File& file) {
mbedtls_sha256_context ctx;
mbedtls_sha256_init(&ctx);
mbedtls_sha256_starts(&ctx, 0); // 0 for SHA-256
const size_t bufferSize = 1024;
uint8_t buffer[bufferSize];
while (file.available()) {
int bytesRead = file.read(buffer, bufferSize);
mbedtls_sha256_update(&ctx, buffer, bytesRead);
}
uint8_t hash[32];
mbedtls_sha256_finish(&ctx, hash);
mbedtls_sha256_free(&ctx);
// Convert to hex string
String hashString;
for (int i = 0; i < 32; i++) {
if (hash[i] < 16) hashString += "0";
hashString += String(hash[i], HEX);
}
return hashString;
}
void handleTimeBatching() {
static unsigned long lastBatch = 0;
const unsigned long BATCH_INTERVAL = 3600000; // 1 hour
if (millis() - lastBatch >= BATCH_INTERVAL) {
finalizeBatch();
startBatch();
lastBatch = millis();
}
}
void handleSizeBatching() {
const int MAX_BATCH_SAMPLES = 10000;
if (sampleCount >= MAX_BATCH_SAMPLES) {
finalizeBatch();
startBatch();
}
}
void handleMemoryBatching() {
const int MIN_FREE_MEMORY = 50000; // 50KB
if (ESP.getFreeHeap() < MIN_FREE_MEMORY) {
finalizeBatch();
startBatch();
}
}
bool uploadWithRetry(String& data) {
int retries = 0;
int delay_ms = 1000;
while (retries < MAX_RETRIES) {
int httpCode = sendBatchData(data);
if (httpCode == 200) {
return true;
}
if (httpCode == 413) {
// Payload too large - split batch
return splitAndUpload(data);
}
if (httpCode >= 500 || httpCode == 429) {
delay(delay_ms);
delay_ms *= 2; // Exponential backoff
retries++;
} else {
return false; // Don't retry client errors
}
}
return false;
}
  1. Regular Batching: Upload batches every hour during normal operation
  2. Offline Storage: Queue batches locally during network outages
  3. Compression: Always compress batches larger than 1MB
  4. Checksums: Verify data integrity with SHA-256
  5. Cleanup: Remove successfully uploaded batches from storage
  6. Size Limits: Keep batches under 10MB for optimal performance
  7. Retry Logic: Implement exponential backoff for failed uploads
class StorageManager {
public:
void manageStorage() {
// Check available space
size_t totalBytes = SPIFFS.totalBytes();
size_t usedBytes = SPIFFS.usedBytes();
float usage = 100.0 * usedBytes / totalBytes;
Serial.printf("Storage: %.1f%% used (%d/%d bytes)\n",
usage, usedBytes, totalBytes);
// Clean up if usage is high
if (usage > 80.0) {
cleanupOldBatches();
}
// Emergency cleanup
if (usage > 95.0) {
emergencyCleanup();
}
}
private:
void cleanupOldBatches() {
// Remove batches older than 24 hours
File root = SPIFFS.open("/batches");
File file = root.openNextFile();
while (file) {
time_t fileTime = file.getLastWrite();
time_t now = time(nullptr);
if (now - fileTime > 24 * 3600) {
Serial.printf("Removing old batch: %s\n", file.name());
SPIFFS.remove(file.name());
}
file = root.openNextFile();
}
}
};