Initial commit — Serveur Lucas SmartEye
API réception alertes chute (SmartEye/YOLO), analyse IA (Gemini 2.5 Flash), gestion alertes avec escalade (watchdog), notifications Firebase, dashboard web, documentation MkDocs. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
266
analyze_errors_and_learn.py
Executable file
266
analyze_errors_and_learn.py
Executable file
@@ -0,0 +1,266 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Système d'apprentissage des erreurs
|
||||
Analyse les logs d'erreurs pour identifier les patterns et proposer des améliorations
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
from collections import Counter, defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
def analyze_error_logs():
|
||||
"""Analyse les logs d'erreurs Gemini"""
|
||||
error_log = "/tmp/smarteye_gemini_errors.log"
|
||||
|
||||
if not os.path.exists(error_log):
|
||||
return {
|
||||
"status": "no_errors",
|
||||
"message": "Aucun log d'erreur trouvé - Système sain !"
|
||||
}
|
||||
|
||||
with open(error_log, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Patterns à détecter
|
||||
patterns = {
|
||||
"JSONDecodeError": r"JSONDecodeError",
|
||||
"ValueError": r"ValueError",
|
||||
"TimeoutError": r"TimeoutError",
|
||||
"ConnectionError": r"ConnectionError",
|
||||
"API quota": r"quota|rate.?limit",
|
||||
"Empty response": r"Réponse vide|empty response",
|
||||
"Malformed JSON": r"JSON invalide|malformed",
|
||||
}
|
||||
|
||||
# Comptage des erreurs
|
||||
error_counts = {}
|
||||
for name, pattern in patterns.items():
|
||||
matches = re.findall(pattern, content, re.IGNORECASE)
|
||||
if matches:
|
||||
error_counts[name] = len(matches)
|
||||
|
||||
# Extraction des tentatives
|
||||
tentative_pattern = r"TENTATIVE (\d)/(\d)"
|
||||
tentatives = re.findall(tentative_pattern, content)
|
||||
|
||||
# Statistiques
|
||||
total_errors = sum(error_counts.values())
|
||||
retry_success = len([t for t in tentatives if t[0] != t[1]])
|
||||
|
||||
return {
|
||||
"status": "analyzed",
|
||||
"total_errors": total_errors,
|
||||
"error_counts": error_counts,
|
||||
"retry_attempts": len(tentatives),
|
||||
"retry_success_rate": (retry_success / len(tentatives) * 100) if tentatives else 0,
|
||||
"most_common": max(error_counts.items(), key=lambda x: x[1])[0] if error_counts else None
|
||||
}
|
||||
|
||||
def analyze_image_results():
|
||||
"""Analyse les résultats d'analyse des images"""
|
||||
client_folder = "/var/www/lucas/clients/Demo_01"
|
||||
|
||||
if not os.path.exists(client_folder):
|
||||
return None
|
||||
|
||||
# Statistiques des analyses
|
||||
total_images = 0
|
||||
with_json = 0
|
||||
urgence_count = 0
|
||||
false_positive_count = 0
|
||||
confidence_scores = []
|
||||
messages = defaultdict(int)
|
||||
|
||||
for filename in os.listdir(client_folder):
|
||||
if filename.endswith('.jpg'):
|
||||
total_images += 1
|
||||
json_file = os.path.join(client_folder, filename + '.json')
|
||||
|
||||
if os.path.exists(json_file):
|
||||
with_json += 1
|
||||
try:
|
||||
with open(json_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
if data.get('urgence') is True:
|
||||
urgence_count += 1
|
||||
elif data.get('urgence') is False:
|
||||
false_positive_count += 1
|
||||
|
||||
if 'confiance' in data:
|
||||
confidence_scores.append(data['confiance'])
|
||||
|
||||
msg = data.get('message', 'Unknown')[:50]
|
||||
messages[msg] += 1
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 0
|
||||
|
||||
return {
|
||||
"total_images": total_images,
|
||||
"analyzed": with_json,
|
||||
"coverage": (with_json / total_images * 100) if total_images else 0,
|
||||
"urgence_rate": (urgence_count / with_json * 100) if with_json else 0,
|
||||
"false_positive_rate": (false_positive_count / with_json * 100) if with_json else 0,
|
||||
"avg_confidence": avg_confidence,
|
||||
"top_messages": dict(sorted(messages.items(), key=lambda x: x[1], reverse=True)[:5])
|
||||
}
|
||||
|
||||
def generate_recommendations(error_analysis, image_analysis):
|
||||
"""Génère des recommandations basées sur l'analyse"""
|
||||
recommendations = []
|
||||
|
||||
if not error_analysis or error_analysis["status"] == "no_errors":
|
||||
recommendations.append({
|
||||
"priority": "✅ SUCCESS",
|
||||
"title": "Système stable",
|
||||
"description": "Aucune erreur détectée - Le correctif fonctionne parfaitement !",
|
||||
"action": None
|
||||
})
|
||||
else:
|
||||
# Analyse des erreurs
|
||||
if error_analysis["total_errors"] > 10:
|
||||
recommendations.append({
|
||||
"priority": "⚠️ MOYEN",
|
||||
"title": f"{error_analysis['total_errors']} erreurs détectées",
|
||||
"description": "Taux d'erreur encore élevé",
|
||||
"action": "Vérifier la clé API Gemini et les quotas"
|
||||
})
|
||||
|
||||
if error_analysis.get("most_common") == "TimeoutError":
|
||||
recommendations.append({
|
||||
"priority": "⚠️ MOYEN",
|
||||
"title": "Timeouts fréquents",
|
||||
"description": "L'API Gemini répond lentement",
|
||||
"action": "Augmenter le timeout à 60s dans analyze.py"
|
||||
})
|
||||
|
||||
if error_analysis.get("retry_success_rate", 0) < 50:
|
||||
recommendations.append({
|
||||
"priority": "🔴 URGENT",
|
||||
"title": "Faible taux de succès des retries",
|
||||
"description": f"Seulement {error_analysis['retry_success_rate']:.1f}% de succès",
|
||||
"action": "Augmenter max_retries à 5 ou vérifier la connexion"
|
||||
})
|
||||
|
||||
if image_analysis:
|
||||
# Analyse des images
|
||||
if image_analysis["coverage"] < 95:
|
||||
recommendations.append({
|
||||
"priority": "ℹ️ INFO",
|
||||
"title": f"Couverture: {image_analysis['coverage']:.1f}%",
|
||||
"description": f"{image_analysis['total_images'] - image_analysis['analyzed']} images sans analyse",
|
||||
"action": "Exécuter repair_missing_analyses.py si besoin"
|
||||
})
|
||||
|
||||
if image_analysis["urgence_rate"] > 50:
|
||||
recommendations.append({
|
||||
"priority": "⚠️ MOYEN",
|
||||
"title": f"Taux d'urgence élevé: {image_analysis['urgence_rate']:.1f}%",
|
||||
"description": "Beaucoup de vraies alertes détectées",
|
||||
"action": "Vérifier le positionnement des caméras et le prompt IA"
|
||||
})
|
||||
|
||||
if image_analysis["avg_confidence"] < 70:
|
||||
recommendations.append({
|
||||
"priority": "ℹ️ INFO",
|
||||
"title": f"Confiance moyenne: {image_analysis['avg_confidence']:.0f}%",
|
||||
"description": "L'IA hésite sur certaines analyses",
|
||||
"action": "Améliorer le prompt ou utiliser un modèle plus puissant"
|
||||
})
|
||||
|
||||
if not recommendations:
|
||||
recommendations.append({
|
||||
"priority": "✅ SUCCESS",
|
||||
"title": "Système optimal",
|
||||
"description": "Aucune amélioration nécessaire",
|
||||
"action": None
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def main():
|
||||
print("="*70)
|
||||
print("ANALYSE DES ERREURS ET APPRENTISSAGE")
|
||||
print("="*70)
|
||||
print()
|
||||
|
||||
# Analyse des erreurs
|
||||
print("📊 ANALYSE DES LOGS D'ERREURS")
|
||||
print("-"*70)
|
||||
error_analysis = analyze_error_logs()
|
||||
|
||||
if error_analysis["status"] == "no_errors":
|
||||
print("✅ Aucune erreur trouvée - Système parfaitement stable !")
|
||||
else:
|
||||
print(f"Total erreurs: {error_analysis['total_errors']}")
|
||||
print(f"Tentatives de retry: {error_analysis['retry_attempts']}")
|
||||
print(f"Taux de succès retry: {error_analysis.get('retry_success_rate', 0):.1f}%")
|
||||
print()
|
||||
print("Types d'erreurs:")
|
||||
for error_type, count in sorted(error_analysis['error_counts'].items(), key=lambda x: x[1], reverse=True):
|
||||
print(f" • {error_type}: {count}")
|
||||
|
||||
print()
|
||||
print("📸 ANALYSE DES IMAGES")
|
||||
print("-"*70)
|
||||
image_analysis = analyze_image_results()
|
||||
|
||||
if image_analysis:
|
||||
print(f"Total images: {image_analysis['total_images']}")
|
||||
print(f"Analysées: {image_analysis['analyzed']} ({image_analysis['coverage']:.1f}%)")
|
||||
print(f"Taux d'urgence: {image_analysis['urgence_rate']:.1f}%")
|
||||
print(f"Fausses alertes: {image_analysis['false_positive_rate']:.1f}%")
|
||||
print(f"Confiance moyenne: {image_analysis['avg_confidence']:.0f}%")
|
||||
print()
|
||||
print("Messages les plus fréquents:")
|
||||
for msg, count in list(image_analysis['top_messages'].items())[:3]:
|
||||
print(f" • {msg}: {count}x")
|
||||
else:
|
||||
print("⚠️ Dossier client introuvable")
|
||||
|
||||
print()
|
||||
print("💡 RECOMMANDATIONS")
|
||||
print("="*70)
|
||||
|
||||
recommendations = generate_recommendations(error_analysis, image_analysis)
|
||||
|
||||
for i, rec in enumerate(recommendations, 1):
|
||||
print(f"\n{rec['priority']} {rec['title']}")
|
||||
print(f" {rec['description']}")
|
||||
if rec['action']:
|
||||
print(f" → Action: {rec['action']}")
|
||||
|
||||
print()
|
||||
print("="*70)
|
||||
|
||||
# Sauvegarde du rapport
|
||||
report = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"error_analysis": error_analysis,
|
||||
"image_analysis": image_analysis,
|
||||
"recommendations": recommendations
|
||||
}
|
||||
|
||||
report_file = "/var/www/lucas/last_analysis_report.json"
|
||||
with open(report_file, 'w') as f:
|
||||
json.dump(report, f, indent=2)
|
||||
|
||||
print(f"📄 Rapport sauvegardé: {report_file}")
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n\n⚠️ Interruption")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"\n❌ Erreur: {type(e).__name__}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
Reference in New Issue
Block a user