@mytec: feat: Phase 3.0 Architecture Refactor

Major refactoring of RFCP backend:
- Modular propagation models (8 models)
- SharedMemoryManager for terrain data
- ProcessPoolExecutor parallel processing
- WebSocket progress streaming
- Building filtering pipeline (351k → 15k)
- 82 unit tests

Performance: Standard preset 38s → 5s (7.6x speedup)

Known issue: Detailed preset timeout (fix in 3.1.0)
This commit is contained in:
2026-02-01 23:12:26 +02:00
parent 1dde56705a
commit defa3ad440
71 changed files with 7134 additions and 256 deletions

View File

@@ -11,6 +11,7 @@ from app.services.coverage_service import (
CoveragePoint,
apply_preset,
PRESETS,
select_propagation_model,
)
from app.services.parallel_coverage_service import CancellationToken
@@ -58,6 +59,12 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
effective_settings = apply_preset(request.settings.model_copy())
models_used = _get_active_models(effective_settings)
# Add the selected propagation model for the first site's frequency
env = getattr(effective_settings, 'environment', 'urban')
primary_model = select_propagation_model(request.sites[0].frequency, env)
if primary_model.name not in models_used:
models_used.insert(0, primary_model.name)
# Time the calculation
start_time = time.time()
cancel_token = CancellationToken()
@@ -182,7 +189,7 @@ async def get_buildings(
def _get_active_models(settings: CoverageSettings) -> List[str]:
"""Determine which propagation models are active"""
models = ["okumura_hata"] # Always active as base model
models = [] # Base propagation model added by caller via select_propagation_model()
if settings.use_terrain:
models.append("terrain_los")

View File

@@ -46,15 +46,29 @@ async def get_system_info():
}
@router.get("/models")
async def get_propagation_models():
"""Return available propagation models and their valid ranges."""
from app.core.engine import engine
return {
"models": engine.get_available_models(),
}
@router.post("/shutdown")
async def shutdown():
"""Graceful shutdown endpoint. Kills worker processes and exits."""
"""Graceful shutdown endpoint. Kills worker processes then self-terminates.
Electron calls this first, waits briefly, then does PID-tree kill.
The os._exit(3s) is a safety net in case Electron doesn't kill us.
"""
from app.services.parallel_coverage_service import _kill_worker_processes
killed = _kill_worker_processes()
# Schedule hard exit after response is sent
loop = asyncio.get_event_loop()
loop.call_later(0.5, lambda: os._exit(0))
# Safety net: self-terminate after 3s if Electron doesn't kill us.
# Delay is long enough for Electron to do PID-tree kill first (preferred).
loop = asyncio.get_running_loop()
loop.call_later(3.0, lambda: os._exit(0))
return {"status": "shutting down", "workers_killed": killed}