Major refactoring of RFCP backend: - Modular propagation models (8 models) - SharedMemoryManager for terrain data - ProcessPoolExecutor parallel processing - WebSocket progress streaming - Building filtering pipeline (351k → 15k) - 82 unit tests Performance: Standard preset 38s → 5s (7.6x speedup) Known issue: Detailed preset timeout (fix in 3.1.0)
75 lines
2.1 KiB
Python
75 lines
2.1 KiB
Python
import os
|
|
import asyncio
|
|
import multiprocessing as mp
|
|
from fastapi import APIRouter
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
@router.get("/info")
|
|
async def get_system_info():
|
|
"""Return system info: CPU cores, GPU availability, parallel backend."""
|
|
cpu_cores = mp.cpu_count() or 1
|
|
|
|
# Check Ray
|
|
ray_available = False
|
|
ray_initialized = False
|
|
try:
|
|
from app.services.parallel_coverage_service import RAY_AVAILABLE
|
|
ray_available = RAY_AVAILABLE
|
|
if ray_available:
|
|
import ray
|
|
ray_initialized = ray.is_initialized()
|
|
except Exception:
|
|
pass
|
|
|
|
# Check GPU via gpu_service
|
|
from app.services.gpu_service import gpu_service
|
|
gpu_info = gpu_service.get_info()
|
|
|
|
# Determine parallel backend
|
|
if ray_available:
|
|
parallel_backend = "ray"
|
|
elif cpu_cores > 1:
|
|
parallel_backend = "process_pool"
|
|
else:
|
|
parallel_backend = "sequential"
|
|
|
|
return {
|
|
"cpu_cores": cpu_cores,
|
|
"parallel_workers": min(cpu_cores, 14),
|
|
"parallel_backend": parallel_backend,
|
|
"ray_available": ray_available,
|
|
"ray_initialized": ray_initialized,
|
|
"gpu": gpu_info,
|
|
"gpu_available": gpu_info.get("available", False),
|
|
}
|
|
|
|
|
|
@router.get("/models")
|
|
async def get_propagation_models():
|
|
"""Return available propagation models and their valid ranges."""
|
|
from app.core.engine import engine
|
|
return {
|
|
"models": engine.get_available_models(),
|
|
}
|
|
|
|
|
|
@router.post("/shutdown")
|
|
async def shutdown():
|
|
"""Graceful shutdown endpoint. Kills worker processes then self-terminates.
|
|
|
|
Electron calls this first, waits briefly, then does PID-tree kill.
|
|
The os._exit(3s) is a safety net in case Electron doesn't kill us.
|
|
"""
|
|
from app.services.parallel_coverage_service import _kill_worker_processes
|
|
|
|
killed = _kill_worker_processes()
|
|
|
|
# Safety net: self-terminate after 3s if Electron doesn't kill us.
|
|
# Delay is long enough for Electron to do PID-tree kill first (preferred).
|
|
loop = asyncio.get_running_loop()
|
|
loop.call_later(3.0, lambda: os._exit(0))
|
|
|
|
return {"status": "shutting down", "workers_killed": killed}
|