API réception alertes chute (SmartEye/YOLO), analyse IA (Gemini 2.5 Flash), gestion alertes avec escalade (watchdog), notifications Firebase, dashboard web, documentation MkDocs. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
328 lines
11 KiB
Python
328 lines
11 KiB
Python
"""
|
|
SmartEye SENTINEL - Gestionnaire d'Alertes avec Acquittement
|
|
=============================================================
|
|
Gère le cycle de vie complet d'une alerte :
|
|
PENDING → DELIVERED → SEEN → ACKNOWLEDGED
|
|
|
|
Tant qu'une alerte n'est pas au minimum SEEN, le watchdog la renverra.
|
|
|
|
Auteur : Unigest Solutions / SmartEye V30
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import time
|
|
import fcntl
|
|
from datetime import datetime, timedelta
|
|
from enum import Enum
|
|
|
|
# --- CONFIGURATION ---
|
|
ALERTS_DIR = "/var/www/lucas/alerts"
|
|
ALERTS_DB = os.path.join(ALERTS_DIR, "alerts_active.json")
|
|
ALERTS_HISTORY = os.path.join(ALERTS_DIR, "alerts_history.json")
|
|
LOCK_FILE = os.path.join(ALERTS_DIR, ".alerts.lock")
|
|
|
|
# Stratégie d'escalade (en secondes après la création)
|
|
ESCALATION_SCHEDULE = [
|
|
{"delay": 120, "label": "Rappel 1", "priority": "high"},
|
|
{"delay": 300, "label": "Rappel 2", "priority": "critical"},
|
|
{"delay": 600, "label": "Rappel 3", "priority": "critical"},
|
|
{"delay": 1200, "label": "Rappel URGENT", "priority": "emergency"},
|
|
{"delay": 1800, "label": "ESCALADE MAX", "priority": "emergency"},
|
|
]
|
|
|
|
# Durée max avant qu'une alerte soit considérée comme expirée (2h)
|
|
ALERT_MAX_AGE = 7200
|
|
|
|
|
|
class AlertStatus:
|
|
PENDING = "PENDING" # Créée, notification envoyée
|
|
DELIVERED = "DELIVERED" # Firebase confirme la livraison
|
|
SEEN = "SEEN" # L'app a été ouverte, alerte affichée
|
|
ACKNOWLEDGED = "ACKNOWLEDGED" # L'utilisateur a appuyé "J'ai vu" / "J'appelle"
|
|
RESOLVED = "RESOLVED" # Situation traitée (fausse alerte ou secours envoyés)
|
|
EXPIRED = "EXPIRED" # Timeout sans réponse (après escalade max)
|
|
|
|
|
|
def _ensure_dir():
|
|
"""Crée le répertoire d'alertes s'il n'existe pas"""
|
|
os.makedirs(ALERTS_DIR, exist_ok=True)
|
|
|
|
|
|
def _load_alerts():
|
|
"""Charge les alertes actives depuis le fichier JSON avec verrou"""
|
|
_ensure_dir()
|
|
if not os.path.exists(ALERTS_DB):
|
|
return {}
|
|
try:
|
|
with open(ALERTS_DB, 'r') as f:
|
|
return json.load(f)
|
|
except (json.JSONDecodeError, IOError):
|
|
return {}
|
|
|
|
|
|
def _save_alerts(alerts):
|
|
"""Sauvegarde les alertes avec verrou fichier (concurrence safe)"""
|
|
_ensure_dir()
|
|
lock_fd = None
|
|
try:
|
|
lock_fd = open(LOCK_FILE, 'w')
|
|
fcntl.flock(lock_fd, fcntl.LOCK_EX)
|
|
|
|
with open(ALERTS_DB, 'w') as f:
|
|
json.dump(alerts, f, indent=2, default=str)
|
|
finally:
|
|
if lock_fd:
|
|
fcntl.flock(lock_fd, fcntl.LOCK_UN)
|
|
lock_fd.close()
|
|
|
|
|
|
def _archive_alert(alert):
|
|
"""Archive une alerte terminée dans l'historique"""
|
|
_ensure_dir()
|
|
history = []
|
|
if os.path.exists(ALERTS_HISTORY):
|
|
try:
|
|
with open(ALERTS_HISTORY, 'r') as f:
|
|
history = json.load(f)
|
|
except:
|
|
history = []
|
|
|
|
alert["archived_at"] = datetime.now().isoformat()
|
|
history.append(alert)
|
|
|
|
# Garder max 500 alertes en historique
|
|
if len(history) > 500:
|
|
history = history[-500:]
|
|
|
|
with open(ALERTS_HISTORY, 'w') as f:
|
|
json.dump(history, f, indent=2, default=str)
|
|
|
|
|
|
# =============================================
|
|
# API PUBLIQUE
|
|
# =============================================
|
|
|
|
def creer_alerte(alert_id, image_path, camera_id=None, audio_relay_ws=None, camera_ip_map=None, analyse_data=None):
|
|
"""
|
|
Crée une nouvelle alerte et l'enregistre comme PENDING.
|
|
Appelée par analyze.py après détection d'une chute.
|
|
|
|
Returns: dict avec les infos de l'alerte créée
|
|
"""
|
|
alerts = _load_alerts()
|
|
|
|
now = datetime.now()
|
|
|
|
alerte = {
|
|
"alert_id": alert_id,
|
|
"status": AlertStatus.PENDING,
|
|
"image_path": image_path,
|
|
"camera_id": camera_id or "cam_default",
|
|
"audio_relay_ws": audio_relay_ws or "ws://57.128.74.87:8800",
|
|
"camera_ip_map": camera_ip_map or "{}",
|
|
"created_at": now.isoformat(),
|
|
"updated_at": now.isoformat(),
|
|
"notification_count": 1, # Première notif déjà envoyée par analyze.py
|
|
"last_notification_at": now.isoformat(),
|
|
"escalation_level": 0,
|
|
"analyse": analyse_data or {},
|
|
"events": [
|
|
{
|
|
"timestamp": now.isoformat(),
|
|
"event": "CREATED",
|
|
"detail": "Alerte créée suite à détection de chute"
|
|
},
|
|
{
|
|
"timestamp": now.isoformat(),
|
|
"event": "NOTIFICATION_SENT",
|
|
"detail": "Première notification Firebase envoyée"
|
|
}
|
|
]
|
|
}
|
|
|
|
alerts[alert_id] = alerte
|
|
_save_alerts(alerts)
|
|
|
|
return alerte
|
|
|
|
|
|
def mettre_a_jour_statut(alert_id, new_status, detail=None):
|
|
"""
|
|
Met à jour le statut d'une alerte.
|
|
Appelée par l'endpoint PHP quand l'app envoie un ACK.
|
|
|
|
Returns: True si mise à jour OK, False si alerte non trouvée
|
|
"""
|
|
alerts = _load_alerts()
|
|
|
|
if alert_id not in alerts:
|
|
return False
|
|
|
|
now = datetime.now()
|
|
alerte = alerts[alert_id]
|
|
old_status = alerte["status"]
|
|
|
|
# Empêcher les régressions de statut (ACKNOWLEDGED ne revient pas à SEEN)
|
|
status_order = [
|
|
AlertStatus.PENDING,
|
|
AlertStatus.DELIVERED,
|
|
AlertStatus.SEEN,
|
|
AlertStatus.ACKNOWLEDGED,
|
|
AlertStatus.RESOLVED
|
|
]
|
|
|
|
try:
|
|
old_idx = status_order.index(old_status)
|
|
new_idx = status_order.index(new_status)
|
|
if new_idx < old_idx and new_status != AlertStatus.EXPIRED:
|
|
return False # Pas de régression
|
|
except ValueError:
|
|
pass # Statut inconnu, on laisse passer
|
|
|
|
alerte["status"] = new_status
|
|
alerte["updated_at"] = now.isoformat()
|
|
alerte["events"].append({
|
|
"timestamp": now.isoformat(),
|
|
"event": f"STATUS_CHANGED",
|
|
"detail": f"{old_status} → {new_status}" + (f" | {detail}" if detail else "")
|
|
})
|
|
|
|
# Si l'alerte est résolue ou expirée, l'archiver
|
|
if new_status in (AlertStatus.RESOLVED, AlertStatus.EXPIRED):
|
|
_archive_alert(alerte)
|
|
del alerts[alert_id]
|
|
else:
|
|
alerts[alert_id] = alerte
|
|
|
|
_save_alerts(alerts)
|
|
return True
|
|
|
|
|
|
def get_alertes_pending():
|
|
"""
|
|
Retourne toutes les alertes qui nécessitent un renvoi.
|
|
Utilisée par le watchdog (cron).
|
|
|
|
Returns: liste des alertes à renvoyer avec leur niveau d'escalade
|
|
"""
|
|
alerts = _load_alerts()
|
|
now = datetime.now()
|
|
a_renvoyer = []
|
|
|
|
for alert_id, alerte in alerts.items():
|
|
# Seules les alertes PENDING ou DELIVERED nécessitent un renvoi
|
|
if alerte["status"] not in (AlertStatus.PENDING, AlertStatus.DELIVERED):
|
|
continue
|
|
|
|
created_at = datetime.fromisoformat(alerte["created_at"])
|
|
age_seconds = (now - created_at).total_seconds()
|
|
|
|
# Vérifier si l'alerte a expiré
|
|
if age_seconds > ALERT_MAX_AGE:
|
|
mettre_a_jour_statut(alert_id, AlertStatus.EXPIRED, "Timeout 2h sans réponse")
|
|
continue
|
|
|
|
# Déterminer le niveau d'escalade actuel
|
|
current_level = alerte.get("escalation_level", 0)
|
|
|
|
# Chercher le prochain palier d'escalade à déclencher
|
|
for i, palier in enumerate(ESCALATION_SCHEDULE):
|
|
if i <= current_level:
|
|
continue # Déjà passé ce palier
|
|
|
|
if age_seconds >= palier["delay"]:
|
|
# Vérifier qu'on n'a pas envoyé récemment (anti-spam : 60s min entre 2 envois)
|
|
last_notif = datetime.fromisoformat(alerte["last_notification_at"])
|
|
if (now - last_notif).total_seconds() < 60:
|
|
continue
|
|
|
|
a_renvoyer.append({
|
|
"alert_id": alert_id,
|
|
"alerte": alerte,
|
|
"escalation_level": i,
|
|
"escalation_label": palier["label"],
|
|
"priority": palier["priority"],
|
|
"age_seconds": int(age_seconds),
|
|
"notification_count": alerte["notification_count"]
|
|
})
|
|
break # Un seul palier à la fois
|
|
|
|
return a_renvoyer
|
|
|
|
|
|
def enregistrer_envoi(alert_id, escalation_level):
|
|
"""
|
|
Enregistre qu'une notification de rappel a été envoyée.
|
|
Appelée par le watchdog après renvoi.
|
|
"""
|
|
alerts = _load_alerts()
|
|
|
|
if alert_id not in alerts:
|
|
return False
|
|
|
|
now = datetime.now()
|
|
alerte = alerts[alert_id]
|
|
alerte["notification_count"] = alerte.get("notification_count", 0) + 1
|
|
alerte["last_notification_at"] = now.isoformat()
|
|
alerte["escalation_level"] = escalation_level
|
|
alerte["updated_at"] = now.isoformat()
|
|
alerte["events"].append({
|
|
"timestamp": now.isoformat(),
|
|
"event": "NOTIFICATION_RESENT",
|
|
"detail": f"Rappel niveau {escalation_level} (envoi #{alerte['notification_count']})"
|
|
})
|
|
|
|
alerts[alert_id] = alerte
|
|
_save_alerts(alerts)
|
|
return True
|
|
|
|
|
|
def get_alerte(alert_id):
|
|
"""Récupère une alerte par son ID"""
|
|
alerts = _load_alerts()
|
|
return alerts.get(alert_id, None)
|
|
|
|
|
|
def get_toutes_alertes_actives():
|
|
"""Retourne toutes les alertes actives (pour debug/monitoring)"""
|
|
return _load_alerts()
|
|
|
|
|
|
def compter_alertes_actives():
|
|
"""Compte les alertes actives (pour anti-spam dans analyze.py)"""
|
|
alerts = _load_alerts()
|
|
return len([a for a in alerts.values()
|
|
if a["status"] in (AlertStatus.PENDING, AlertStatus.DELIVERED)])
|
|
|
|
|
|
def nettoyer_alertes_obsoletes(max_age_seen=14400, max_age_ack=7200):
|
|
"""
|
|
Nettoie les alertes SEEN/ACKNOWLEDGED trop vieilles.
|
|
Appelée par le watchdog à chaque cycle.
|
|
|
|
- SEEN > 4h → EXPIRED (personne n'a réagi)
|
|
- ACKNOWLEDGED > 2h → RESOLVED (situation traitée)
|
|
|
|
Returns: nombre d'alertes nettoyées
|
|
"""
|
|
alerts = _load_alerts()
|
|
now = datetime.now()
|
|
cleaned = 0
|
|
|
|
for alert_id in list(alerts.keys()):
|
|
alerte = alerts[alert_id]
|
|
created = datetime.fromisoformat(alerte["created_at"])
|
|
age = (now - created).total_seconds()
|
|
|
|
if alerte["status"] == AlertStatus.SEEN and age > max_age_seen:
|
|
mettre_a_jour_statut(alert_id, AlertStatus.EXPIRED, f"Auto-expiré après {int(age//3600)}h sans acquittement")
|
|
cleaned += 1
|
|
elif alerte["status"] == AlertStatus.ACKNOWLEDGED and age > max_age_ack:
|
|
mettre_a_jour_statut(alert_id, AlertStatus.RESOLVED, f"Auto-résolu après acquittement ({int(age//3600)}h)")
|
|
cleaned += 1
|
|
elif alerte["status"] == AlertStatus.PENDING and age > ALERT_MAX_AGE:
|
|
mettre_a_jour_statut(alert_id, AlertStatus.EXPIRED, f"Timeout {int(age//3600)}h sans réponse")
|
|
cleaned += 1
|
|
|
|
return cleaned |