Skip to main content

06 - Edge Deployment

Edge computing setup for local inference and offline operation


Table of Contents

  1. Edge Architecture
  2. Hardware Setup
  3. Docker Deployment
  4. ML Model Optimization
  5. Offline Operation
  6. Synchronization

1. Edge Architecture

Edge vs Cloud Processing

┌─────────────────────────────────────────────────────────────────┐
│ EDGE-CLOUD HYBRID │
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ CLOUD │ │
│ │ • Full ML models • Historical analytics │ │
│ │ • Long-term storage • Dashboard hosting │ │
│ │ • Blockchain verification • Report generation │ │
│ └─────────────────────────────────────────────────────────┘ │
│ ▲ │
│ │ Sync (when online) │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ EDGE │ │
│ │ • Lightweight ML (TFLite) • Real-time alerts │ │
│ │ • Local buffering • Protocol conversion │ │
│ │ • Basic predictions • Sensor aggregation │ │
│ └─────────────────────────────────────────────────────────┘ │
│ ▲ │
│ │ Direct connection │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ SENSORS │ │
│ └─────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────┘

When to Use Edge

ScenarioEdgeCloud
Real-time alerts (< 100ms)
Complex ML predictions
Offline operation
Historical analytics
Data aggregation
Blockchain storage

2. Hardware Setup

2.1 Siemens IOT2050 Setup

# Connect via SSH
ssh root@192.168.1.100

# Update system
apt update && apt upgrade -y

# Install Docker
curl -fsSL https://get.docker.com | sh
usermod -aG docker $USER

# Install Node.js
curl -fsSL https://deb.nodesource.com/setup_18.x | bash -
apt install -y nodejs

# Install Python for ML
apt install -y python3 python3-pip
pip3 install tensorflow-lite numpy

2.2 Raspberry Pi Industrial Setup

# Flash Raspberry Pi OS Lite (64-bit)
# Enable SSH and configure WiFi

# Install dependencies
sudo apt update
sudo apt install -y docker.io python3-pip nodejs npm

# Enable hardware interfaces
sudo raspi-config
# Enable: I2C, SPI, Serial

# Install industrial HAT drivers (if applicable)
pip3 install adafruit-circuitpython-ads1x15 # For ADC
pip3 install pymodbus # For Modbus

2.3 Network Configuration

# /etc/netplan/01-network.yaml (Ubuntu-based)
network:
version: 2
ethernets:
eth0:
addresses:
- 192.168.1.100/24
gateway4: 192.168.1.1
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
eth1:
addresses:
- 10.0.0.1/24 # Sensor network (isolated)

3. Docker Deployment

3.1 Edge Docker Compose

File: docker-compose.edge.yml

version: "3.8"

services:
# MQTT Broker (local)
mqtt:
image: eclipse-mosquitto:2
ports:
- "1883:1883"
volumes:
- ./mosquitto/config:/mosquitto/config
- mqtt-data:/mosquitto/data
restart: unless-stopped

# Local Database (SQLite via API)
edge-db:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis-data:/data
restart: unless-stopped

# Edge Gateway Service
edge-gateway:
build:
context: .
dockerfile: Dockerfile.edge
ports:
- "3000:3000"
environment:
- MQTT_BROKER=mqtt://mqtt:1883
- REDIS_URL=redis://edge-db:6379
- CLOUD_API_URL=${CLOUD_API_URL}
- SYNC_INTERVAL=60000
depends_on:
- mqtt
- edge-db
restart: unless-stopped

# ML Inference Service
ml-inference:
build:
context: ./ml
dockerfile: Dockerfile.tflite
ports:
- "8000:8000"
volumes:
- ./models:/app/models
restart: unless-stopped

# Node-RED (optional, for visual programming)
node-red:
image: nodered/node-red:3.1
ports:
- "1880:1880"
volumes:
- nodered-data:/data
restart: unless-stopped

volumes:
mqtt-data:
redis-data:
nodered-data:

3.2 Edge Gateway Dockerfile

File: Dockerfile.edge

FROM node:18-alpine

WORKDIR /app

# Install dependencies
COPY package*.json ./
RUN npm ci --only=production

# Copy application
COPY src/ ./src/
COPY tsconfig.json ./

# Build
RUN npm run build

# Health check
HEALTHCHECK --interval=30s --timeout=10s \
CMD curl -f http://localhost:3000/health || exit 1

EXPOSE 3000

CMD ["node", "dist/edge-gateway.js"]

3.3 TensorFlow Lite Inference Dockerfile

File: ml/Dockerfile.tflite

FROM python:3.10-slim

WORKDIR /app

# Install TFLite runtime
RUN pip install --no-cache-dir \
tflite-runtime \
numpy \
fastapi \
uvicorn

COPY inference_server.py ./
COPY models/ ./models/

EXPOSE 8000

CMD ["uvicorn", "inference_server:app", "--host", "0.0.0.0", "--port", "8000"]

4. ML Model Optimization

4.1 Convert to TensorFlow Lite

# convert_to_tflite.py
import tensorflow as tf

# Load your trained model
model = tf.keras.models.load_model('water_quality_model.h5')

# Convert to TFLite
converter = tf.lite.TFLiteConverter.from_keras_model(model)

# Optimization options
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16] # Float16 quantization

# Convert
tflite_model = converter.convert()

# Save
with open('water_quality_model.tflite', 'wb') as f:
f.write(tflite_model)

print(f"Model size: {len(tflite_model) / 1024:.2f} KB")

4.2 Edge Inference Server

File: ml/inference_server.py

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import numpy as np
import tflite_runtime.interpreter as tflite

app = FastAPI(title="Edge ML Inference")

# Load TFLite model
interpreter = tflite.Interpreter(model_path="models/water_quality_model.tflite")
interpreter.allocate_tensors()

input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

class PredictionInput(BaseModel):
pH: float
BOD: float
COD: float
TSS: float
TDS: float
flow_rate: float
temperature: float

class PredictionOutput(BaseModel):
prediction: str
confidence: float
inference_time_ms: float

@app.post("/predict", response_model=PredictionOutput)
async def predict(data: PredictionInput):
import time
start = time.time()

# Prepare input
input_data = np.array([[
data.pH, data.BOD, data.COD, data.TSS,
data.TDS, data.flow_rate, data.temperature
]], dtype=np.float32)

# Run inference
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])

# Process output
class_idx = np.argmax(output[0])
confidence = float(output[0][class_idx])

classes = ['Class_A', 'Class_B', 'Class_C', 'Class_D', 'Class_E']
prediction = classes[class_idx]

inference_time = (time.time() - start) * 1000

return PredictionOutput(
prediction=prediction,
confidence=confidence,
inference_time_ms=inference_time
)

@app.get("/health")
async def health():
return {"status": "healthy", "model": "water_quality_model.tflite"}

4.3 Model Size Comparison

Model FormatSizeInference TimeAccuracy
Full TensorFlow15 MB50ms100%
TFLite (Float32)4 MB15ms99.8%
TFLite (Float16)2 MB12ms99.5%
TFLite (Int8)1 MB8ms98.5%

5. Offline Operation

5.1 Local Data Buffer

// src/edge/local-buffer.ts
import Redis from "ioredis";

const redis = new Redis(process.env.REDIS_URL);

export async function bufferReading(reading: SensorReading) {
const key = `readings:${Date.now()}`;
await redis.setex(key, 86400, JSON.stringify(reading)); // 24h TTL
await redis.lpush("readings:queue", key);
}

export async function getBufferedReadings(limit = 1000) {
const keys = await redis.lrange("readings:queue", 0, limit - 1);
const readings = await Promise.all(
keys.map(async (key) => {
const data = await redis.get(key);
return data ? JSON.parse(data) : null;
})
);
return readings.filter(Boolean);
}

export async function clearSyncedReadings(keys: string[]) {
if (keys.length === 0) return;
await redis.del(...keys);
await redis.lrem("readings:queue", 0, ...keys);
}

5.2 Offline Alert System

// src/edge/offline-alerts.ts

const THRESHOLDS = {
pH: { min: 6.5, max: 8.5, critical_min: 5.0, critical_max: 10.0 },
BOD: { max: 30, critical_max: 100 },
COD: { max: 250, critical_max: 500 },
TSS: { max: 100, critical_max: 300 },
};

export function checkLocalAlerts(reading: SensorReading): Alert[] {
const alerts: Alert[] = [];
const threshold = THRESHOLDS[reading.parameter];

if (!threshold) return alerts;

// Warning level
if (threshold.min && reading.value < threshold.min) {
alerts.push({
level: "warning",
message: `${reading.parameter} below minimum (${reading.value} < ${threshold.min})`,
reading,
});
}

if (threshold.max && reading.value > threshold.max) {
alerts.push({
level: "warning",
message: `${reading.parameter} above maximum (${reading.value} > ${threshold.max})`,
reading,
});
}

// Critical level
if (threshold.critical_min && reading.value < threshold.critical_min) {
alerts.push({
level: "critical",
message: `CRITICAL: ${reading.parameter} critically low!`,
reading,
});
triggerLocalAlarm(reading);
}

return alerts;
}

function triggerLocalAlarm(reading: SensorReading) {
// Trigger local HMI alarm
// Send SMS via local GSM module
// Activate relay for visual/audio alarm
console.error("🚨 CRITICAL ALARM:", reading);
}

6. Synchronization

6.1 Cloud Sync Service

// src/edge/cloud-sync.ts

export class CloudSyncService {
private syncInterval: NodeJS.Timeout | null = null;
private isOnline = false;

async start(intervalMs = 60000) {
// Check connectivity
this.isOnline = await this.checkConnectivity();

// Start sync loop
this.syncInterval = setInterval(async () => {
await this.sync();
}, intervalMs);

// Initial sync
await this.sync();
}

private async checkConnectivity(): Promise<boolean> {
try {
const response = await fetch(`${process.env.CLOUD_API_URL}/health`, {
method: "GET",
timeout: 5000,
});
return response.ok;
} catch {
return false;
}
}

async sync() {
this.isOnline = await this.checkConnectivity();

if (!this.isOnline) {
console.log("Offline - skipping sync");
return;
}

try {
// Get buffered readings
const readings = await getBufferedReadings(500);

if (readings.length === 0) return;

// Send to cloud
const response = await fetch(`${process.env.CLOUD_API_URL}/api/iot/batch`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": process.env.CLOUD_API_KEY!,
},
body: JSON.stringify({ readings }),
});

if (response.ok) {
// Clear synced readings
const keys = readings.map((_, i) => `readings:${i}`);
await clearSyncedReadings(keys);
console.log(`Synced ${readings.length} readings to cloud`);
}
} catch (error) {
console.error("Sync failed:", error);
}
}

stop() {
if (this.syncInterval) {
clearInterval(this.syncInterval);
}
}
}

6.2 Deployment Script

#!/bin/bash
# deploy-edge.sh

EDGE_HOST="192.168.1.100"
EDGE_USER="root"

echo "Deploying to edge device..."

# Build images locally (for ARM if needed)
docker-compose -f docker-compose.edge.yml build

# Save and transfer images
docker save edge-gateway ml-inference | gzip > edge-images.tar.gz
scp edge-images.tar.gz $EDGE_USER@$EDGE_HOST:/tmp/

# Deploy on edge
ssh $EDGE_USER@$EDGE_HOST << 'EOF'
cd /opt/Edubotx-edge
docker load < /tmp/edge-images.tar.gz
docker-compose -f docker-compose.edge.yml up -d
rm /tmp/edge-images.tar.gz
EOF

echo "Edge deployment complete!"

Next Steps


Last Updated: December 2024