@mytec: feat: Phase 3.0 Architecture Refactor

Major refactoring of RFCP backend:
- Modular propagation models (8 models)
- SharedMemoryManager for terrain data
- ProcessPoolExecutor parallel processing
- WebSocket progress streaming
- Building filtering pipeline (351k → 15k)
- 82 unit tests

Performance: Standard preset 38s → 5s (7.6x speedup)

Known issue: Detailed preset timeout (fix in 3.1.0)
This commit is contained in:
2026-02-01 23:12:26 +02:00
parent 1dde56705a
commit defa3ad440
71 changed files with 7134 additions and 256 deletions

View File

@@ -11,6 +11,7 @@ from app.services.coverage_service import (
CoveragePoint,
apply_preset,
PRESETS,
select_propagation_model,
)
from app.services.parallel_coverage_service import CancellationToken
@@ -58,6 +59,12 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
effective_settings = apply_preset(request.settings.model_copy())
models_used = _get_active_models(effective_settings)
# Add the selected propagation model for the first site's frequency
env = getattr(effective_settings, 'environment', 'urban')
primary_model = select_propagation_model(request.sites[0].frequency, env)
if primary_model.name not in models_used:
models_used.insert(0, primary_model.name)
# Time the calculation
start_time = time.time()
cancel_token = CancellationToken()
@@ -182,7 +189,7 @@ async def get_buildings(
def _get_active_models(settings: CoverageSettings) -> List[str]:
"""Determine which propagation models are active"""
models = ["okumura_hata"] # Always active as base model
models = [] # Base propagation model added by caller via select_propagation_model()
if settings.use_terrain:
models.append("terrain_los")

View File

@@ -46,15 +46,29 @@ async def get_system_info():
}
@router.get("/models")
async def get_propagation_models():
"""Return available propagation models and their valid ranges."""
from app.core.engine import engine
return {
"models": engine.get_available_models(),
}
@router.post("/shutdown")
async def shutdown():
"""Graceful shutdown endpoint. Kills worker processes and exits."""
"""Graceful shutdown endpoint. Kills worker processes then self-terminates.
Electron calls this first, waits briefly, then does PID-tree kill.
The os._exit(3s) is a safety net in case Electron doesn't kill us.
"""
from app.services.parallel_coverage_service import _kill_worker_processes
killed = _kill_worker_processes()
# Schedule hard exit after response is sent
loop = asyncio.get_event_loop()
loop.call_later(0.5, lambda: os._exit(0))
# Safety net: self-terminate after 3s if Electron doesn't kill us.
# Delay is long enough for Electron to do PID-tree kill first (preferred).
loop = asyncio.get_running_loop()
loop.call_later(3.0, lambda: os._exit(0))
return {"status": "shutting down", "workers_killed": killed}

View File

@@ -0,0 +1,261 @@
"""
WebSocket handler for real-time coverage calculation with progress.
Uses the same coverage_service pipeline as the HTTP endpoint but sends
progress updates during computation phases.
"""
import time
import asyncio
import threading
from typing import Optional
from fastapi import WebSocket, WebSocketDisconnect
from app.services.coverage_service import (
coverage_service, SiteParams, CoverageSettings, apply_preset,
select_propagation_model,
)
from app.services.parallel_coverage_service import CancellationToken
class ConnectionManager:
"""Track cancellation tokens per calculation."""
def __init__(self):
self._cancel_tokens: dict[str, CancellationToken] = {}
async def send_progress(
self, ws: WebSocket, calc_id: str,
phase: str, progress: float, eta: Optional[float] = None,
):
try:
await ws.send_json({
"type": "progress",
"calculation_id": calc_id,
"phase": phase,
"progress": min(progress, 1.0),
"eta_seconds": eta,
})
except Exception:
pass
async def send_result(self, ws: WebSocket, calc_id: str, result: dict):
try:
await ws.send_json({
"type": "result",
"calculation_id": calc_id,
"data": result,
})
except Exception:
pass
async def send_error(self, ws: WebSocket, calc_id: str, error: str):
try:
await ws.send_json({
"type": "error",
"calculation_id": calc_id,
"message": error,
})
except Exception:
pass
ws_manager = ConnectionManager()
async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
"""Run coverage calculation with progress updates via WebSocket."""
cancel_token = CancellationToken()
ws_manager._cancel_tokens[calc_id] = cancel_token
try:
sites_data = data.get("sites", [])
settings_data = data.get("settings", {})
if not sites_data:
await ws_manager.send_error(ws, calc_id, "At least one site required")
return
if len(sites_data) > 10:
await ws_manager.send_error(ws, calc_id, "Maximum 10 sites per request")
return
# Parse sites and settings (same format as HTTP endpoint)
sites = [SiteParams(**s) for s in sites_data]
settings = CoverageSettings(**settings_data)
if settings.radius > 50000:
await ws_manager.send_error(ws, calc_id, "Maximum radius 50km")
return
if settings.resolution < 50:
await ws_manager.send_error(ws, calc_id, "Minimum resolution 50m")
return
effective_settings = apply_preset(settings.model_copy())
# Determine models used
from app.api.routes.coverage import _get_active_models
models_used = _get_active_models(effective_settings)
env = getattr(effective_settings, 'environment', 'urban')
primary_model = select_propagation_model(sites[0].frequency, env)
if primary_model.name not in models_used:
models_used.insert(0, primary_model.name)
await ws_manager.send_progress(ws, calc_id, "Initializing", 0.05)
# ── Bridge sync progress_fn → async WS sends ──
# progress_fn is called from two contexts:
# 1. Event loop thread (phases 1-2.5, directly in calculate_coverage)
# 2. Worker threads (phase 3, from ProcessPool/sequential executors)
# We detect which thread we're on and use the appropriate method.
loop = asyncio.get_running_loop()
event_loop_thread_id = threading.current_thread().ident
progress_queue: asyncio.Queue = asyncio.Queue()
def sync_progress_fn(phase: str, pct: float, _eta: Optional[float] = None):
"""Thread-safe progress callback for coverage_service."""
if threading.current_thread().ident == event_loop_thread_id:
# From event loop thread: put directly to queue
progress_queue.put_nowait((phase, pct))
else:
# From worker thread: use thread-safe bridge to wake event loop
loop.call_soon_threadsafe(progress_queue.put_nowait, (phase, pct))
# Background task: drain queue and send WS progress messages
_sender_done = False
async def progress_sender():
nonlocal _sender_done
last_pct = 0.0
while not _sender_done:
try:
phase, pct = await asyncio.wait_for(progress_queue.get(), timeout=0.5)
if pct >= 1.0:
break
# Throttle: only send if progress changed meaningfully
if pct - last_pct >= 0.02 or phase != "Calculating coverage":
await ws_manager.send_progress(ws, calc_id, phase, pct)
last_pct = pct
except asyncio.TimeoutError:
continue
except Exception:
break
progress_task = asyncio.create_task(progress_sender())
# Run calculation with timeout
start_time = time.time()
try:
if len(sites) == 1:
points = await asyncio.wait_for(
coverage_service.calculate_coverage(
sites[0], settings, cancel_token,
progress_fn=sync_progress_fn,
),
timeout=300.0,
)
else:
points = await asyncio.wait_for(
coverage_service.calculate_multi_site_coverage(
sites, settings, cancel_token,
),
timeout=300.0,
)
except asyncio.TimeoutError:
cancel_token.cancel()
_sender_done = True
progress_queue.put_nowait(("done", 1.0))
await progress_task
from app.services.parallel_coverage_service import _kill_worker_processes
_kill_worker_processes()
await ws_manager.send_error(ws, calc_id, "Calculation timeout (5 min)")
return
except asyncio.CancelledError:
cancel_token.cancel()
_sender_done = True
progress_queue.put_nowait(("done", 1.0))
await progress_task
await ws_manager.send_error(ws, calc_id, "Calculation cancelled")
return
# Stop progress sender
_sender_done = True
progress_queue.put_nowait(("done", 1.0))
await progress_task
computation_time = time.time() - start_time
# Build response (identical format to HTTP endpoint)
rsrp_values = [p.rsrp for p in points]
los_count = sum(1 for p in points if p.has_los)
stats = {
"min_rsrp": min(rsrp_values) if rsrp_values else 0,
"max_rsrp": max(rsrp_values) if rsrp_values else 0,
"avg_rsrp": sum(rsrp_values) / len(rsrp_values) if rsrp_values else 0,
"los_percentage": (los_count / len(points) * 100) if points else 0,
"points_with_buildings": sum(1 for p in points if p.building_loss > 0),
"points_with_terrain_loss": sum(1 for p in points if p.terrain_loss > 0),
"points_with_reflection_gain": sum(1 for p in points if p.reflection_gain > 0),
"points_with_vegetation_loss": sum(1 for p in points if p.vegetation_loss > 0),
"points_with_rain_loss": sum(1 for p in points if p.rain_loss > 0),
"points_with_indoor_loss": sum(1 for p in points if p.indoor_loss > 0),
"points_with_atmospheric_loss": sum(1 for p in points if p.atmospheric_loss > 0),
}
result = {
"points": [p.model_dump() for p in points],
"count": len(points),
"settings": effective_settings.model_dump(),
"stats": stats,
"computation_time": round(computation_time, 2),
"models_used": models_used,
}
await ws_manager.send_result(ws, calc_id, result)
except Exception as e:
# Stop progress sender on unhandled exception
_sender_done = True
try:
progress_queue.put_nowait(("done", 1.0))
except Exception:
pass
try:
await progress_task
except Exception:
pass
await ws_manager.send_error(ws, calc_id, str(e))
finally:
ws_manager._cancel_tokens.pop(calc_id, None)
async def websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for coverage calculations with progress."""
await websocket.accept()
try:
while True:
data = await websocket.receive_json()
msg_type = data.get("type")
if msg_type == "calculate":
calc_id = data.get("id", "")
asyncio.create_task(_run_calculation(websocket, calc_id, data))
elif msg_type == "cancel":
calc_id = data.get("id")
token = ws_manager._cancel_tokens.get(calc_id)
if token:
token.cancel()
elif msg_type == "ping":
await websocket.send_json({"type": "pong"})
except WebSocketDisconnect:
for token in ws_manager._cancel_tokens.values():
token.cancel()
except Exception:
for token in ws_manager._cancel_tokens.values():
token.cancel()

View File

@@ -0,0 +1,6 @@
"""
Core business logic for RFCP.
Existing modules: config.py, database.py
New modules: engine.py, grid.py, calculator.py, result.py
"""

View File

@@ -0,0 +1,103 @@
"""
Point calculator — coordinates per-point propagation calculation.
"""
import math
from typing import Optional
from app.propagation.base import PropagationModel, PropagationInput
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
from app.core.result import PointResult
class PointCalculator:
"""Calculates propagation for individual grid points."""
def __init__(self, model: PropagationModel, environment: str = "urban"):
self.model = model
self.environment = environment
self.diffraction = KnifeEdgeDiffractionModel()
def calculate_point(
self,
site_lat: float, site_lon: float, site_height: float,
site_power: float, site_gain: float, site_frequency: float,
point_lat: float, point_lon: float,
distance: float,
has_los: bool = True,
terrain_clearance: Optional[float] = None,
building_loss: float = 0.0,
extra_loss: float = 0.0,
azimuth: Optional[float] = None,
beamwidth: float = 360,
) -> PointResult:
if distance < 1:
distance = 1
prop_input = PropagationInput(
frequency_mhz=site_frequency,
distance_m=distance,
tx_height_m=site_height,
rx_height_m=1.5,
environment=self.environment,
)
if self.model.is_valid_for(prop_input):
output = self.model.calculate(prop_input)
path_loss = output.path_loss_db
else:
from app.propagation.free_space import FreeSpaceModel
output = FreeSpaceModel().calculate(prop_input)
path_loss = output.path_loss_db
antenna_loss = 0.0
if azimuth is not None and beamwidth < 360:
antenna_loss = self._antenna_pattern_loss(
site_lat, site_lon, point_lat, point_lon, azimuth, beamwidth,
)
terrain_loss = 0.0
if terrain_clearance is not None and terrain_clearance < 0:
terrain_loss = self.diffraction.calculate_clearance_loss(
terrain_clearance, site_frequency,
)
has_los = False
rsrp = (
site_power + site_gain
- path_loss - antenna_loss
- terrain_loss - building_loss - extra_loss
)
return PointResult(
lat=point_lat, lon=point_lon, rsrp=rsrp,
distance=distance, path_loss=path_loss,
terrain_loss=terrain_loss, building_loss=building_loss,
diffraction_loss=terrain_loss, has_los=has_los,
model_used=self.model.name,
)
@staticmethod
def _antenna_pattern_loss(
site_lat: float, site_lon: float,
point_lat: float, point_lon: float,
azimuth: float, beamwidth: float,
) -> float:
lat1, lon1 = math.radians(site_lat), math.radians(site_lon)
lat2, lon2 = math.radians(point_lat), math.radians(point_lon)
dlon = lon2 - lon1
x = math.sin(dlon) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dlon)
bearing = (math.degrees(math.atan2(x, y)) + 360) % 360
angle_diff = abs(bearing - azimuth)
if angle_diff > 180:
angle_diff = 360 - angle_diff
half_bw = beamwidth / 2
if angle_diff <= half_bw:
loss = 3 * (angle_diff / half_bw) ** 2
else:
loss = 3 + 12 * ((angle_diff - half_bw) / half_bw) ** 2
loss = min(loss, 25)
return loss

240
backend/app/core/engine.py Normal file
View File

@@ -0,0 +1,240 @@
"""
CoverageEngine — main orchestrator for coverage calculations.
Coordinates data loading, model selection, parallel computation,
and result aggregation. Does NOT implement propagation physics
(delegated to models) or handle HTTP (delegated to API layer).
"""
import time
import asyncio
from enum import Enum
from dataclasses import dataclass
from typing import List, Optional, Callable, Awaitable
from app.propagation.base import PropagationModel, PropagationInput
from app.propagation.free_space import FreeSpaceModel
from app.propagation.okumura_hata import OkumuraHataModel
from app.propagation.cost231_hata import Cost231HataModel
from app.propagation.cost231_wi import Cost231WIModel
from app.propagation.itu_r_p1546 import ITUR_P1546Model
from app.propagation.longley_rice import LongleyRiceModel
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
from app.core.result import CoverageResult, PointResult, compute_stats
class BandType(Enum):
LTE = "lte" # 700-2600 MHz
UHF = "uhf" # 400-520 MHz
VHF = "vhf" # 136-174 MHz
CUSTOM = "custom" # User-defined
class PresetType(Enum):
FAST = "fast"
STANDARD = "standard"
DETAILED = "detailed"
FULL = "full"
@dataclass
class Site:
id: str
lat: float
lon: float
height: float # meters AGL
power: float # dBm
gain: float # dBi
frequency: float # MHz
band_type: BandType = BandType.LTE
azimuth: Optional[float] = None
beamwidth: float = 65
tilt: float = 0
environment: str = "urban"
@dataclass
class CoverageSettings:
radius: float = 10000
resolution: float = 200
min_signal: float = -120
preset: PresetType = PresetType.STANDARD
band_type: BandType = BandType.LTE
environment: str = "urban"
terrain_enabled: bool = True
buildings_enabled: bool = True
diffraction_enabled: bool = True
reflection_enabled: bool = False
# Legacy toggles (backward compat)
use_terrain: bool = True
use_buildings: bool = True
use_materials: bool = True
use_dominant_path: bool = False
use_street_canyon: bool = False
use_reflections: bool = False
use_water_reflection: bool = False
use_vegetation: bool = False
season: str = "summer"
rain_rate: float = 0.0
indoor_loss_type: str = "none"
use_atmospheric: bool = False
temperature_c: float = 15.0
humidity_percent: float = 50.0
ProgressCallback = Callable[[str, float, Optional[float]], Awaitable[None]]
class CoverageEngine:
"""
Main orchestrator for coverage calculations.
Selects the appropriate propagation model based on band type
and environment, then delegates to the existing coverage pipeline.
"""
_model_registry = {
(BandType.LTE, "urban"): Cost231HataModel,
(BandType.LTE, "suburban"): OkumuraHataModel,
(BandType.LTE, "rural"): OkumuraHataModel,
(BandType.LTE, "open"): FreeSpaceModel,
(BandType.UHF, "urban"): OkumuraHataModel,
(BandType.UHF, "suburban"): OkumuraHataModel,
(BandType.UHF, "rural"): LongleyRiceModel,
(BandType.VHF, "urban"): ITUR_P1546Model,
(BandType.VHF, "suburban"): ITUR_P1546Model,
(BandType.VHF, "rural"): LongleyRiceModel,
}
def __init__(self):
self._models = {}
self._init_models()
self.free_space = FreeSpaceModel()
self.diffraction = KnifeEdgeDiffractionModel()
def _init_models(self):
for key, model_cls in self._model_registry.items():
self._models[key] = model_cls()
def select_model(self, band: BandType, environment: str) -> PropagationModel:
key = (band, environment)
if key in self._models:
return self._models[key]
if (band, "urban") in self._models:
return self._models[(band, "urban")]
return OkumuraHataModel()
def get_available_models(self) -> dict:
models = {}
seen = set()
for (band, env), model in self._models.items():
if model.name not in seen:
seen.add(model.name)
models[model.name] = {
"frequency_range": model.frequency_range,
"distance_range": model.distance_range,
"bands": [],
}
models[model.name]["bands"].append(f"{band.value}/{env}")
return models
async def calculate(
self,
sites: List[Site],
settings: CoverageSettings,
progress_callback: Optional[ProgressCallback] = None,
) -> CoverageResult:
"""
Main calculation entry point.
Delegates actual per-point work to the legacy coverage_service
pipeline, wrapping it with the new clean interface.
"""
start_time = time.time()
model = self.select_model(settings.band_type, settings.environment)
if progress_callback:
await progress_callback("init", 0.05, None)
# Import legacy system
from app.services.coverage_service import (
coverage_service, SiteParams,
CoverageSettings as LegacySettings,
)
from app.services.parallel_coverage_service import CancellationToken
legacy_settings = LegacySettings(
radius=settings.radius,
resolution=settings.resolution,
min_signal=settings.min_signal,
use_terrain=settings.use_terrain,
use_buildings=settings.use_buildings,
use_materials=settings.use_materials,
use_dominant_path=settings.use_dominant_path,
use_street_canyon=settings.use_street_canyon,
use_reflections=settings.use_reflections,
use_water_reflection=settings.use_water_reflection,
use_vegetation=settings.use_vegetation,
season=settings.season,
rain_rate=settings.rain_rate,
indoor_loss_type=settings.indoor_loss_type,
use_atmospheric=settings.use_atmospheric,
temperature_c=settings.temperature_c,
humidity_percent=settings.humidity_percent,
preset=settings.preset.value if isinstance(settings.preset, PresetType) else settings.preset,
)
cancel_token = CancellationToken()
if progress_callback:
await progress_callback("calculating", 0.25, None)
legacy_sites = [
SiteParams(
lat=s.lat, lon=s.lon, height=s.height,
power=s.power, gain=s.gain, frequency=s.frequency,
azimuth=s.azimuth, beamwidth=s.beamwidth,
)
for s in sites
]
if len(legacy_sites) == 1:
points = await coverage_service.calculate_coverage(
legacy_sites[0], legacy_settings, cancel_token,
)
else:
points = await coverage_service.calculate_multi_site_coverage(
legacy_sites, legacy_settings, cancel_token,
)
if progress_callback:
await progress_callback("done", 1.0, None)
result_points = [
PointResult(
lat=p.lat, lon=p.lon, rsrp=p.rsrp,
distance=p.distance, path_loss=0.0,
terrain_loss=p.terrain_loss,
building_loss=p.building_loss,
diffraction_loss=0.0,
has_los=p.has_los,
model_used=model.name,
)
for p in points
]
computation_time = time.time() - start_time
return CoverageResult(
points=result_points,
stats=compute_stats(result_points),
computation_time=computation_time,
models_used=[model.name],
)
# Singleton
engine = CoverageEngine()

83
backend/app/core/grid.py Normal file
View File

@@ -0,0 +1,83 @@
"""
Grid generation for coverage calculations.
"""
import numpy as np
from dataclasses import dataclass
from typing import List, Tuple
from app.geometry.haversine import haversine_distance
@dataclass
class BoundingBox:
min_lat: float
min_lon: float
max_lat: float
max_lon: float
@dataclass
class Grid:
points: List[Tuple[float, float]]
bounding_box: BoundingBox
resolution: float
radius: float
class GridService:
"""Generate coverage grid points."""
@staticmethod
def generate(
center_lat: float,
center_lon: float,
radius: float,
resolution: float,
) -> Grid:
points = []
lat_step = resolution / 111000
lon_step = resolution / (111000 * np.cos(np.radians(center_lat)))
lat_delta = radius / 111000
lon_delta = radius / (111000 * np.cos(np.radians(center_lat)))
bbox = BoundingBox(
min_lat=center_lat - lat_delta,
min_lon=center_lon - lon_delta,
max_lat=center_lat + lat_delta,
max_lon=center_lon + lon_delta,
)
lat = center_lat - lat_delta
while lat <= center_lat + lat_delta:
lon = center_lon - lon_delta
while lon <= center_lon + lon_delta:
dist = haversine_distance(center_lat, center_lon, lat, lon)
if dist <= radius:
points.append((lat, lon))
lon += lon_step
lat += lat_step
return Grid(points=points, bounding_box=bbox, resolution=resolution, radius=radius)
@staticmethod
def generate_multi_site(sites: list, radius: float, resolution: float) -> Grid:
all_points = set()
min_lat = min_lon = float("inf")
max_lat = max_lon = float("-inf")
for site in sites:
grid = GridService.generate(site.lat, site.lon, radius, resolution)
for p in grid.points:
all_points.add((round(p[0], 7), round(p[1], 7)))
min_lat = min(min_lat, grid.bounding_box.min_lat)
min_lon = min(min_lon, grid.bounding_box.min_lon)
max_lat = max(max_lat, grid.bounding_box.max_lat)
max_lon = max(max_lon, grid.bounding_box.max_lon)
return Grid(
points=list(all_points),
bounding_box=BoundingBox(min_lat, min_lon, max_lat, max_lon),
resolution=resolution, radius=radius,
)

View File

@@ -0,0 +1,65 @@
"""
Coverage result aggregation and statistics.
"""
from dataclasses import dataclass
from typing import List
@dataclass
class PointResult:
lat: float
lon: float
rsrp: float
distance: float
path_loss: float
terrain_loss: float
building_loss: float
diffraction_loss: float
has_los: bool
model_used: str
def to_dict(self) -> dict:
return {
"lat": self.lat, "lon": self.lon,
"rsrp": self.rsrp, "distance": self.distance,
"path_loss": self.path_loss, "terrain_loss": self.terrain_loss,
"building_loss": self.building_loss, "diffraction_loss": self.diffraction_loss,
"has_los": self.has_los, "model_used": self.model_used,
}
@dataclass
class CoverageResult:
points: List[PointResult]
stats: dict
computation_time: float
models_used: List[str]
def to_dict(self) -> dict:
return {
"points": [p.to_dict() for p in self.points],
"count": len(self.points),
"stats": self.stats,
"computation_time": round(self.computation_time, 2),
"models_used": self.models_used,
}
def compute_stats(points: List[PointResult]) -> dict:
if not points:
return {"min_rsrp": 0, "max_rsrp": 0, "avg_rsrp": 0,
"los_percentage": 0, "total_points": 0}
rsrp_values = [p.rsrp for p in points]
los_count = sum(1 for p in points if p.has_los)
return {
"min_rsrp": min(rsrp_values),
"max_rsrp": max(rsrp_values),
"avg_rsrp": sum(rsrp_values) / len(rsrp_values),
"los_percentage": los_count / len(points) * 100,
"total_points": len(points),
"points_with_buildings": sum(1 for p in points if p.building_loss > 0),
"points_with_terrain_loss": sum(1 for p in points if p.terrain_loss > 0),
}

View File

@@ -0,0 +1,38 @@
"""
Geometry operations for RF propagation calculations.
NumPy-dependent modules (haversine, intersection, reflection) are
imported lazily so pure-Python modules (diffraction, los) remain
available even when NumPy is not installed.
"""
from app.geometry.diffraction import knife_edge_loss
from app.geometry.los import check_los_terrain, fresnel_radius
def __getattr__(name):
"""Lazy import for NumPy-dependent geometry functions."""
_numpy_exports = {
"haversine_distance", "haversine_batch", "points_to_local_coords",
"line_segments_intersect_batch", "line_intersects_polygons_batch",
"calculate_reflection_points_batch", "find_best_reflection_path",
}
if name in _numpy_exports:
if name in ("haversine_distance", "haversine_batch", "points_to_local_coords"):
from app.geometry.haversine import haversine_distance, haversine_batch, points_to_local_coords
return locals()[name]
elif name in ("line_segments_intersect_batch", "line_intersects_polygons_batch"):
from app.geometry.intersection import line_segments_intersect_batch, line_intersects_polygons_batch
return locals()[name]
elif name in ("calculate_reflection_points_batch", "find_best_reflection_path"):
from app.geometry.reflection import calculate_reflection_points_batch, find_best_reflection_path
return locals()[name]
raise AttributeError(f"module 'app.geometry' has no attribute {name!r}")
__all__ = [
"haversine_distance", "haversine_batch", "points_to_local_coords",
"line_segments_intersect_batch", "line_intersects_polygons_batch",
"calculate_reflection_points_batch", "find_best_reflection_path",
"knife_edge_loss", "check_los_terrain", "fresnel_radius",
]

View File

@@ -0,0 +1,40 @@
"""
Knife-edge diffraction geometry calculations.
"""
import math
def knife_edge_loss(
d1_m: float,
d2_m: float,
h_m: float,
wavelength_m: float,
) -> float:
"""
Calculate diffraction loss over single knife edge.
Args:
d1_m: Distance TX to obstacle
d2_m: Distance obstacle to RX
h_m: Obstacle height above LOS (positive = above)
wavelength_m: Signal wavelength
Returns:
Loss in dB (>= 0)
"""
if d1_m <= 0 or d2_m <= 0 or wavelength_m <= 0:
return 0.0
v = h_m * math.sqrt(2 * (d1_m + d2_m) / (wavelength_m * d1_m * d2_m))
if v < -0.78:
L = 0.0
elif v < 0:
L = 6.02 + 9.11 * v - 1.27 * v ** 2
elif v < 2.4:
L = 6.02 + 9.11 * v + 1.65 * v ** 2
else:
L = 12.95 + 20 * math.log10(v)
return max(0.0, L)

View File

@@ -0,0 +1,50 @@
"""
Distance calculations using the haversine formula.
Supports both scalar and batch (NumPy array) operations.
"""
import numpy as np
from typing import Tuple
EARTH_RADIUS = 6371000 # meters
def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""Calculate distance between two points in meters."""
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
c = 2 * np.arcsin(np.sqrt(a))
return float(EARTH_RADIUS * c)
def haversine_batch(
lat1: float, lon1: float,
lats2: np.ndarray, lons2: np.ndarray,
) -> np.ndarray:
"""Distance from one point to many points (meters)."""
lat1_rad = np.radians(lat1)
lon1_rad = np.radians(lon1)
lats2_rad = np.radians(lats2)
lons2_rad = np.radians(lons2)
dlat = lats2_rad - lat1_rad
dlon = lons2_rad - lon1_rad
a = np.sin(dlat / 2) ** 2 + np.cos(lat1_rad) * np.cos(lats2_rad) * np.sin(dlon / 2) ** 2
c = 2 * np.arcsin(np.sqrt(a))
return EARTH_RADIUS * c
def points_to_local_coords(
ref_lat: float, ref_lon: float,
lats: np.ndarray, lons: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert lat/lon to local X/Y meters (equirectangular projection)."""
cos_lat = np.cos(np.radians(ref_lat))
x = (lons - ref_lon) * 111320.0 * cos_lat
y = (lats - ref_lat) * 110540.0
return x, y

View File

@@ -0,0 +1,116 @@
"""
Vectorized line-segment and line-polygon intersection checks.
All operations use NumPy for batch processing.
"""
import numpy as np
from typing import Tuple
def line_segments_intersect_batch(
p1: np.ndarray, p2: np.ndarray,
segments_start: np.ndarray, segments_end: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check if line p1->p2 intersects with N segments.
Args:
p1, p2: shape (2,)
segments_start, segments_end: shape (N, 2)
Returns:
intersects: bool array (N,)
t_values: parameter along p1->p2 (N,)
"""
d = p2 - p1
seg_d = segments_end - segments_start
cross = d[0] * seg_d[:, 1] - d[1] * seg_d[:, 0]
parallel_mask = np.abs(cross) < 1e-10
cross_safe = np.where(parallel_mask, 1.0, cross)
dp = p1 - segments_start
t = (dp[:, 0] * seg_d[:, 1] - dp[:, 1] * seg_d[:, 0]) / cross_safe
u = (dp[:, 0] * d[1] - dp[:, 1] * d[0]) / cross_safe
intersects = ~parallel_mask & (t >= 0) & (t <= 1) & (u >= 0) & (u <= 1)
return intersects, t
def line_intersects_polygons_batch(
p1: np.ndarray, p2: np.ndarray,
polygons_x: np.ndarray, polygons_y: np.ndarray,
polygon_lengths: np.ndarray,
max_polygons: int = 30,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check if line p1->p2 intersects multiple polygons.
Uses bounding-box pre-filter to limit work when polygon count is large.
Args:
p1, p2: shape (2,)
polygons_x, polygons_y: flattened vertex arrays
polygon_lengths: vertices per polygon (num_polygons,)
max_polygons: only check nearest N polygons
Returns:
intersects: bool (num_polygons,)
min_distances: distance to first hit (num_polygons,)
"""
num_polygons = len(polygon_lengths)
if num_polygons == 0:
return np.array([], dtype=bool), np.array([])
intersects = np.zeros(num_polygons, dtype=bool)
min_t = np.full(num_polygons, np.inf)
# Pre-filter: bounding box check
if num_polygons > max_polygons:
buf = 50.0
line_min_x = min(p1[0], p2[0]) - buf
line_max_x = max(p1[0], p2[0]) + buf
line_min_y = min(p1[1], p2[1]) - buf
line_max_y = max(p1[1], p2[1]) + buf
nearby_mask = np.zeros(num_polygons, dtype=bool)
vi = 0
for i, length in enumerate(polygon_lengths):
if length >= 3:
cx = polygons_x[vi]
cy = polygons_y[vi]
if line_min_x <= cx <= line_max_x and line_min_y <= cy <= line_max_y:
nearby_mask[i] = True
vi += length
nearby_indices = np.where(nearby_mask)[0]
if len(nearby_indices) > max_polygons:
nearby_mask = np.zeros(num_polygons, dtype=bool)
nearby_mask[nearby_indices[:max_polygons]] = True
else:
nearby_mask = np.ones(num_polygons, dtype=bool)
idx = 0
for i, length in enumerate(polygon_lengths):
if length < 3 or not nearby_mask[i]:
idx += length
continue
px = polygons_x[idx:idx + length]
py = polygons_y[idx:idx + length]
starts = np.stack([px, py], axis=1)
ends = np.stack([np.roll(px, -1), np.roll(py, -1)], axis=1)
edge_intersects, t_vals = line_segments_intersect_batch(p1, p2, starts, ends)
if np.any(edge_intersects):
intersects[i] = True
min_t[i] = np.min(t_vals[edge_intersects])
idx += length
line_length = np.linalg.norm(p2 - p1)
min_distances = min_t * line_length
return intersects, min_distances

View File

@@ -0,0 +1,85 @@
"""
Line-of-sight checks using terrain profile data.
"""
import math
from typing import Optional, Dict, List
EARTH_RADIUS = 6371000
K_FACTOR = 4 / 3 # Standard atmospheric refraction
def check_los_terrain(
profile: List[dict],
tx_height: float,
rx_height: float,
) -> dict:
"""
Check line-of-sight from a terrain elevation profile.
Args:
profile: List of dicts with 'elevation' and 'distance' keys.
tx_height: TX antenna height above ground (meters).
rx_height: RX height above ground (meters).
Returns:
dict with has_los, clearance, blocked_at
"""
if not profile:
return {"has_los": True, "clearance": 0.0, "blocked_at": None}
tx_ground = profile[0]["elevation"]
rx_ground = profile[-1]["elevation"]
tx_total = tx_ground + tx_height
rx_total = rx_ground + rx_height
total_distance = profile[-1]["distance"]
min_clearance = float("inf")
blocked_at = None
for point in profile:
d = point["distance"]
terrain_elev = point["elevation"]
if total_distance == 0:
los_height = tx_total
else:
los_height = tx_total + (rx_total - tx_total) * (d / total_distance)
# Earth curvature correction
effective_radius = K_FACTOR * EARTH_RADIUS
curvature = (d * (total_distance - d)) / (2 * effective_radius)
los_height_corrected = los_height - curvature
clearance = los_height_corrected - terrain_elev
if clearance < min_clearance:
min_clearance = clearance
if clearance <= 0:
blocked_at = d
return {
"has_los": min_clearance > 0,
"clearance": min_clearance,
"blocked_at": blocked_at,
}
def fresnel_radius(
d1_m: float, d2_m: float, wavelength_m: float, zone: int = 1
) -> float:
"""Calculate Fresnel zone radius at a point along the path.
Args:
d1_m: Distance from TX to point
d2_m: Distance from point to RX
wavelength_m: Signal wavelength
zone: Fresnel zone number (default 1)
Returns:
Radius in meters
"""
total = d1_m + d2_m
if total <= 0:
return 0.0
return math.sqrt(zone * wavelength_m * d1_m * d2_m / total)

View File

@@ -0,0 +1,163 @@
"""
Vectorized reflection point calculations using mirror-image method.
"""
import numpy as np
from typing import Tuple, Optional
from app.geometry.intersection import line_intersects_polygons_batch
def calculate_reflection_points_batch(
tx: np.ndarray, rx: np.ndarray,
wall_starts: np.ndarray, wall_ends: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate reflection points on N walls via mirror-image method.
Args:
tx, rx: shape (2,)
wall_starts, wall_ends: shape (N, 2)
Returns:
reflection_points: (N, 2)
valid: bool (N,)
"""
wall_vec = wall_ends - wall_starts
wall_length = np.linalg.norm(wall_vec, axis=1, keepdims=True)
wall_unit = wall_vec / np.maximum(wall_length, 1e-10)
normals = np.stack([-wall_unit[:, 1], wall_unit[:, 0]], axis=1)
tx_to_wall = tx - wall_starts
tx_dist_to_wall = np.sum(tx_to_wall * normals, axis=1, keepdims=True)
tx_mirror = tx - 2 * tx_dist_to_wall * normals
rx_to_mirror = tx_mirror - rx
cross_denom = (rx_to_mirror[:, 0] * wall_vec[:, 1] -
rx_to_mirror[:, 1] * wall_vec[:, 0])
valid_denom = np.abs(cross_denom) > 1e-10
cross_denom_safe = np.where(valid_denom, cross_denom, 1.0)
rx_to_start = wall_starts - rx
t = (rx_to_start[:, 0] * rx_to_mirror[:, 1] -
rx_to_start[:, 1] * rx_to_mirror[:, 0]) / cross_denom_safe
reflection_points = wall_starts + t[:, np.newaxis] * wall_vec
valid = valid_denom & (t >= 0) & (t <= 1) & (tx_dist_to_wall[:, 0] > 0)
return reflection_points, valid
def find_best_reflection_path(
tx: np.ndarray, rx: np.ndarray,
building_walls_start: np.ndarray,
building_walls_end: np.ndarray,
wall_to_building: np.ndarray,
obstacle_polygons_x: np.ndarray,
obstacle_polygons_y: np.ndarray,
obstacle_lengths: np.ndarray,
max_candidates: int = 50,
max_walls: int = 100,
max_los_checks: int = 10,
) -> Tuple[Optional[np.ndarray], float, float]:
"""Find best single-reflection path using vectorized ops.
Args:
max_walls: Only consider closest N walls for reflection candidates.
max_los_checks: Only verify LOS for top N shortest reflection paths.
Returns:
best_reflection_point: (2,) or None
best_path_length: meters
best_reflection_loss: dB
"""
num_walls = len(building_walls_start)
if num_walls == 0:
return None, np.inf, 0.0
# Limit walls by distance to path midpoint
if num_walls > max_walls:
midpoint = (tx + rx) / 2
wall_midpoints = (building_walls_start + building_walls_end) / 2
wall_distances = np.linalg.norm(wall_midpoints - midpoint, axis=1)
closest = np.argpartition(wall_distances, max_walls)[:max_walls]
building_walls_start = building_walls_start[closest]
building_walls_end = building_walls_end[closest]
wall_to_building = wall_to_building[closest]
refl_points, valid = calculate_reflection_points_batch(
tx, rx, building_walls_start, building_walls_end,
)
if not np.any(valid):
return None, np.inf, 0.0
valid_indices = np.where(valid)[0]
valid_refl = refl_points[valid]
tx_to_refl = np.linalg.norm(valid_refl - tx, axis=1)
refl_to_rx = np.linalg.norm(rx - valid_refl, axis=1)
path_lengths = tx_to_refl + refl_to_rx
# Direct distance filter
direct_dist = np.linalg.norm(rx - tx)
within_range = path_lengths <= direct_dist * 2.0
if not np.any(within_range):
return None, np.inf, 0.0
valid_indices = valid_indices[within_range]
valid_refl = valid_refl[within_range]
path_lengths = path_lengths[within_range]
# Keep top candidates by shortest path
if len(valid_indices) > max_candidates:
top_idx = np.argpartition(path_lengths, max_candidates)[:max_candidates]
valid_indices = valid_indices[top_idx]
valid_refl = valid_refl[top_idx]
path_lengths = path_lengths[top_idx]
# Sort by path length for early exit
sort_order = np.argsort(path_lengths)
valid_refl = valid_refl[sort_order]
path_lengths = path_lengths[sort_order]
# Check LOS only for top N shortest candidates
check_count = min(len(valid_refl), max_los_checks)
best_idx = -1
best_length = np.inf
for i in range(check_count):
length = path_lengths[i]
if length >= best_length:
continue
refl_pt = valid_refl[i]
intersects1, _ = line_intersects_polygons_batch(
tx, refl_pt, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
)
if np.any(intersects1):
continue
intersects2, _ = line_intersects_polygons_batch(
refl_pt, rx, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
)
if np.any(intersects2):
continue
best_idx = i
best_length = length
break # sorted by length, first valid is best
if best_idx < 0:
return None, np.inf, 0.0
best_point = valid_refl[best_idx]
# Reflection loss: 3-10 dB depending on path ratio
path_ratio = best_length / max(direct_dist, 1.0)
reflection_loss = 3.0 + 7.0 * min(1.0, (path_ratio - 1.0) * 2)
return best_point, best_length, reflection_loss

View File

@@ -1,10 +1,11 @@
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi import FastAPI, WebSocket
from fastapi.middleware.cors import CORSMiddleware
from app.core.database import connect_to_mongo, close_mongo_connection
from app.api.routes import health, projects, terrain, coverage, regions, system
from app.api.websocket import websocket_endpoint
@asynccontextmanager
@@ -17,7 +18,7 @@ async def lifespan(app: FastAPI):
app = FastAPI(
title="RFCP Backend API",
description="RF Coverage Planning Backend",
version="1.6.0",
version="3.0.0",
lifespan=lifespan,
)
@@ -30,7 +31,7 @@ app.add_middleware(
allow_headers=["*"],
)
# Routes
# REST routes
app.include_router(health.router, prefix="/api/health", tags=["health"])
app.include_router(projects.router, prefix="/api/projects", tags=["projects"])
app.include_router(terrain.router, prefix="/api/terrain", tags=["terrain"])
@@ -38,10 +39,13 @@ app.include_router(coverage.router, prefix="/api/coverage", tags=["coverage"])
app.include_router(regions.router, prefix="/api/regions", tags=["regions"])
app.include_router(system.router, prefix="/api/system", tags=["system"])
# WebSocket endpoint for real-time coverage with progress
app.websocket("/ws")(websocket_endpoint)
@app.get("/")
async def root():
return {"message": "RFCP Backend API", "version": "1.5.1"}
return {"message": "RFCP Backend API", "version": "3.0.0"}
if __name__ == "__main__":

View File

@@ -0,0 +1,11 @@
"""
Parallel processing infrastructure for coverage calculations.
"""
from app.parallel.manager import SharedMemoryManager, SharedTerrainData, SharedBuildingData
from app.parallel.pool import ManagedProcessPool
__all__ = [
"SharedMemoryManager", "SharedTerrainData", "SharedBuildingData",
"ManagedProcessPool",
]

View File

@@ -0,0 +1,174 @@
"""
Shared Memory Manager for parallel processing.
Instead of copying building/terrain data to each worker,
store data in shared memory that all workers can read.
"""
import multiprocessing.shared_memory as shm
import numpy as np
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class SharedTerrainData:
"""Reference to terrain data in shared memory."""
shm_name: str
shape: tuple
bounds: tuple # (min_lat, min_lon, max_lat, max_lon)
resolution: float
def get_array(self) -> np.ndarray:
existing_shm = shm.SharedMemory(name=self.shm_name)
return np.ndarray(self.shape, dtype=np.int16, buffer=existing_shm.buf)
@dataclass
class SharedBuildingData:
"""Reference to building data in shared memory."""
shm_centroids_name: str # (N, 2) float64
shm_heights_name: str # (N,) float32
shm_vertices_name: str # (total_verts, 2) float64
shm_offsets_name: str # (N+1,) int32
count: int
total_vertices: int
def get_centroids(self) -> np.ndarray:
existing = shm.SharedMemory(name=self.shm_centroids_name)
return np.ndarray((self.count, 2), dtype=np.float64, buffer=existing.buf)
def get_heights(self) -> np.ndarray:
existing = shm.SharedMemory(name=self.shm_heights_name)
return np.ndarray((self.count,), dtype=np.float32, buffer=existing.buf)
def get_offsets(self) -> np.ndarray:
existing = shm.SharedMemory(name=self.shm_offsets_name)
return np.ndarray((self.count + 1,), dtype=np.int32, buffer=existing.buf)
def get_vertices(self) -> np.ndarray:
existing = shm.SharedMemory(name=self.shm_vertices_name)
return np.ndarray((self.total_vertices, 2), dtype=np.float64, buffer=existing.buf)
def get_polygon(self, idx: int) -> np.ndarray:
offsets = self.get_offsets()
vertices = self.get_vertices()
start, end = offsets[idx], offsets[idx + 1]
return vertices[start:end]
class SharedMemoryManager:
"""
Manages shared memory blocks for parallel processing.
Usage:
manager = SharedMemoryManager()
terrain_ref = manager.store_terrain(heights, bounds, resolution)
buildings_ref = manager.store_buildings(buildings)
# Pass references (small dataclasses) to workers
pool.map(worker_func, points, terrain_ref, buildings_ref)
# Workers attach to shared memory — no copy!
terrain = terrain_ref.get_array()
# Cleanup when done
manager.cleanup()
"""
def __init__(self):
self._shm_blocks: list = []
def store_terrain(
self, heights: np.ndarray, bounds: tuple, resolution: float,
) -> SharedTerrainData:
"""Store terrain heights in shared memory."""
shm_block = shm.SharedMemory(create=True, size=heights.nbytes)
self._shm_blocks.append(shm_block)
shm_array = np.ndarray(heights.shape, dtype=heights.dtype, buffer=shm_block.buf)
shm_array[:] = heights[:]
return SharedTerrainData(
shm_name=shm_block.name,
shape=heights.shape,
bounds=bounds,
resolution=resolution,
)
def store_buildings(self, buildings: list) -> Optional[SharedBuildingData]:
"""Store building data in shared memory.
Args:
buildings: List of Building objects or dicts with geometry.
Returns:
SharedBuildingData reference, or None if no buildings.
"""
n = len(buildings)
if n == 0:
return None
# Extract centroids
centroids = np.zeros((n, 2), dtype=np.float64)
heights = np.zeros(n, dtype=np.float32)
all_vertices = []
offsets = [0]
for i, b in enumerate(buildings):
# Support both dict and object forms
if hasattr(b, 'geometry'):
geom = b.geometry
h = getattr(b, 'height', 10.0)
else:
geom = b.get('geometry', [])
h = b.get('height', 10.0)
if geom:
lats = [p[1] for p in geom]
lons = [p[0] for p in geom]
centroids[i] = [sum(lats) / len(lats), sum(lons) / len(lons)]
for lon, lat in geom:
all_vertices.append([lat, lon])
heights[i] = h or 10.0
offsets.append(len(all_vertices))
vertices = np.array(all_vertices, dtype=np.float64) if all_vertices else np.zeros((0, 2), dtype=np.float64)
offsets = np.array(offsets, dtype=np.int32)
# Create shared memory
shm_centroids = shm.SharedMemory(create=True, size=max(centroids.nbytes, 1))
shm_heights = shm.SharedMemory(create=True, size=max(heights.nbytes, 1))
shm_vertices = shm.SharedMemory(create=True, size=max(vertices.nbytes, 1))
shm_offsets = shm.SharedMemory(create=True, size=max(offsets.nbytes, 1))
self._shm_blocks.extend([shm_centroids, shm_heights, shm_vertices, shm_offsets])
# Copy data
if centroids.nbytes > 0:
np.ndarray(centroids.shape, dtype=centroids.dtype, buffer=shm_centroids.buf)[:] = centroids
if heights.nbytes > 0:
np.ndarray(heights.shape, dtype=heights.dtype, buffer=shm_heights.buf)[:] = heights
if vertices.nbytes > 0:
np.ndarray(vertices.shape, dtype=vertices.dtype, buffer=shm_vertices.buf)[:] = vertices
if offsets.nbytes > 0:
np.ndarray(offsets.shape, dtype=offsets.dtype, buffer=shm_offsets.buf)[:] = offsets
return SharedBuildingData(
shm_centroids_name=shm_centroids.name,
shm_heights_name=shm_heights.name,
shm_vertices_name=shm_vertices.name,
shm_offsets_name=shm_offsets.name,
count=n,
total_vertices=len(all_vertices),
)
def cleanup(self):
"""Release all shared memory blocks."""
for block in self._shm_blocks:
try:
block.close()
block.unlink()
except Exception:
pass
self._shm_blocks.clear()

View File

@@ -0,0 +1,136 @@
"""
Managed process pool with automatic cleanup.
"""
import os
import sys
import subprocess
import time
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor, as_completed
from typing import List, Dict, Tuple, Optional, Callable
class ManagedProcessPool:
"""
Process pool wrapper with:
- Automatic cleanup on exit
- Worker process kill on failure
- Progress reporting
"""
def __init__(self, max_workers: int = 6):
self.max_workers = min(max_workers, 6)
self._pool: Optional[ProcessPoolExecutor] = None
def map_chunks(
self,
worker_fn: Callable,
chunks: List[tuple],
log_fn: Optional[Callable] = None,
) -> List[Dict]:
"""
Submit chunks to the pool and collect results.
Args:
worker_fn: Function to call for each chunk
chunks: List of (chunk_data, *args) tuples
log_fn: Progress logging function
Returns:
Flattened list of result dicts
"""
if log_fn is None:
log_fn = lambda msg: print(f"[POOL] {msg}", flush=True)
all_results: List[Dict] = []
try:
ctx = mp.get_context('spawn')
self._pool = ProcessPoolExecutor(
max_workers=self.max_workers, mp_context=ctx,
)
futures = {
self._pool.submit(worker_fn, chunk): i
for i, chunk in enumerate(chunks)
}
completed = 0
t0 = time.time()
for future in as_completed(futures):
try:
chunk_results = future.result()
all_results.extend(chunk_results)
except Exception as e:
log_fn(f"Chunk error: {e}")
completed += 1
elapsed = time.time() - t0
pct = completed * 100 // len(chunks)
log_fn(f"Progress: {completed}/{len(chunks)} ({pct}%)")
except Exception as e:
log_fn(f"Pool error: {e}")
finally:
if self._pool:
self._pool.shutdown(wait=False, cancel_futures=True)
time.sleep(0.5)
killed = self._kill_orphans()
if killed > 0:
log_fn(f"Cleaned up {killed} orphaned workers")
return all_results
@staticmethod
def _kill_orphans() -> int:
"""Kill orphaned rfcp-server worker processes."""
my_pid = os.getpid()
killed = 0
if sys.platform == 'win32':
try:
result = subprocess.run(
['tasklist', '/FI', 'IMAGENAME eq rfcp-server.exe', '/FO', 'CSV', '/NH'],
capture_output=True, text=True, timeout=5,
)
for line in result.stdout.strip().split('\n'):
if 'rfcp-server.exe' not in line:
continue
parts = line.split(',')
if len(parts) >= 2:
pid_str = parts[1].strip().strip('"')
try:
pid = int(pid_str)
if pid != my_pid:
subprocess.run(
['taskkill', '/F', '/PID', str(pid)],
capture_output=True, timeout=5,
)
killed += 1
except (ValueError, subprocess.TimeoutExpired):
pass
except Exception:
pass
else:
try:
result = subprocess.run(
['pgrep', '-f', 'rfcp-server'],
capture_output=True, text=True, timeout=5,
)
for pid_str in result.stdout.strip().split('\n'):
if not pid_str:
continue
try:
pid = int(pid_str)
if pid != my_pid:
os.kill(pid, 9)
killed += 1
except (ValueError, ProcessLookupError, PermissionError):
pass
except Exception:
pass
return killed

View File

@@ -0,0 +1,64 @@
"""
Worker functions for parallel coverage calculation.
These run in separate processes and access shared memory data.
"""
from typing import List, Dict, Optional
from app.parallel.manager import SharedTerrainData, SharedBuildingData
def process_chunk(
chunk: List[tuple],
terrain_cache: dict,
buildings: list,
osm_data: dict,
config: dict,
) -> List[dict]:
"""
Process a chunk of grid points.
This is the standard worker function used by both Ray and ProcessPoolExecutor.
It re-uses the existing coverage calculation logic.
"""
# Inject terrain cache into the module-level singleton
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
# Build spatial index
from app.services.spatial_index import SpatialIndex
spatial_idx = SpatialIndex()
if buildings:
spatial_idx.build(buildings)
# Process points using existing calculator
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
site = SiteParams(**config['site_dict'])
settings = CoverageSettings(**config['settings_dict'])
svc = CoverageService()
timing = {
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
}
precomputed = config.get('precomputed')
results = []
for lat, lon, point_elev in chunk:
pre = precomputed.get((lat, lon)) if precomputed else None
point = svc._calculate_point_sync(
site, lat, lon, settings,
buildings, osm_data.get('streets', []),
spatial_idx, osm_data.get('water_bodies', []),
osm_data.get('vegetation_areas', []),
config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None,
)
if point.rsrp >= settings.min_signal:
results.append(point.model_dump())
return results

View File

@@ -0,0 +1,21 @@
"""
Propagation models for RF coverage calculation.
Each model implements the PropagationModel interface and is stateless/thread-safe.
"""
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
from app.propagation.free_space import FreeSpaceModel
from app.propagation.okumura_hata import OkumuraHataModel
from app.propagation.cost231_hata import Cost231HataModel
from app.propagation.cost231_wi import Cost231WIModel
from app.propagation.itu_r_p1546 import ITUR_P1546Model
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
from app.propagation.longley_rice import LongleyRiceModel
__all__ = [
"PropagationModel", "PropagationInput", "PropagationOutput",
"FreeSpaceModel", "OkumuraHataModel", "Cost231HataModel",
"Cost231WIModel", "ITUR_P1546Model", "KnifeEdgeDiffractionModel",
"LongleyRiceModel",
]

View File

@@ -0,0 +1,87 @@
"""
Abstract base class for all propagation models.
Each model implements a single, well-defined propagation algorithm.
Models are stateless and can be called concurrently.
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class PropagationInput:
"""Input for propagation calculation."""
frequency_mhz: float
distance_m: float
tx_height_m: float
rx_height_m: float
environment: str = "urban" # urban, suburban, rural, open
# Optional terrain info
terrain_clearance_m: Optional[float] = None
terrain_roughness_m: Optional[float] = None
# Optional building info
building_height_m: Optional[float] = None
street_width_m: Optional[float] = None
building_separation_m: Optional[float] = None
@dataclass
class PropagationOutput:
"""Output from propagation calculation."""
path_loss_db: float
model_name: str
is_los: bool
breakdown: dict = field(default_factory=dict)
class PropagationModel(ABC):
"""
Abstract base class for all propagation models.
Each model implements a single, well-defined propagation algorithm.
Models are stateless and can be called concurrently.
"""
@property
@abstractmethod
def name(self) -> str:
"""Model name for logging/display."""
pass
@property
@abstractmethod
def frequency_range(self) -> tuple:
"""Valid frequency range (min_mhz, max_mhz)."""
pass
@property
@abstractmethod
def distance_range(self) -> tuple:
"""Valid distance range (min_m, max_m)."""
pass
@abstractmethod
def calculate(self, input: PropagationInput) -> PropagationOutput:
"""
Calculate path loss for given input.
This method MUST be:
- Stateless (no side effects)
- Thread-safe (can be called concurrently)
- Fast (no I/O, no heavy computation)
"""
pass
def is_valid_for(self, input: PropagationInput) -> bool:
"""Check if this model is valid for given input."""
freq_min, freq_max = self.frequency_range
dist_min, dist_max = self.distance_range
return (
freq_min <= input.frequency_mhz <= freq_max and
dist_min <= input.distance_m <= dist_max
)

View File

@@ -0,0 +1,62 @@
"""
COST-231 Hata model (extension of Okumura-Hata).
Valid for:
- Frequency: 1500-2000 MHz
- Distance: 1-20 km
Better for LTE bands than original Okumura-Hata.
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class Cost231HataModel(PropagationModel):
@property
def name(self) -> str:
return "COST-231-Hata"
@property
def frequency_range(self) -> tuple:
return (1500, 2000)
@property
def distance_range(self) -> tuple:
return (100, 20000)
def calculate(self, input: PropagationInput) -> PropagationOutput:
f = input.frequency_mhz
d = max(input.distance_m / 1000, 0.1)
hb = max(input.tx_height_m, 1.0)
hm = max(input.rx_height_m, 1.0)
# Mobile antenna correction (medium city)
a_hm = (1.1 * math.log10(f) - 0.7) * hm - (1.56 * math.log10(f) - 0.8)
# Metropolitan center correction
C_m = 3 if input.environment == "urban" else 0
L = (
46.3
+ 33.9 * math.log10(f)
- 13.82 * math.log10(hb)
- a_hm
+ (44.9 - 6.55 * math.log10(hb)) * math.log10(d)
+ C_m
)
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=False,
breakdown={
"base_loss": 46.3,
"frequency_term": 33.9 * math.log10(f),
"height_gain": -13.82 * math.log10(hb),
"mobile_correction": -a_hm,
"distance_term": (44.9 - 6.55 * math.log10(hb)) * math.log10(d),
"metro_correction": C_m,
},
)

View File

@@ -0,0 +1,114 @@
"""
COST-231 Walfisch-Ikegami model.
Valid for:
- Frequency: 800-2000 MHz
- Distance: 20m-5km
- Urban microcell environments
Accounts for building heights, street widths, and building separation.
Reference: COST 231 Final Report, Chapter 4.
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class Cost231WIModel(PropagationModel):
@property
def name(self) -> str:
return "COST-231-WI"
@property
def frequency_range(self) -> tuple:
return (800, 2000)
@property
def distance_range(self) -> tuple:
return (20, 5000)
def calculate(self, input: PropagationInput) -> PropagationOutput:
f = input.frequency_mhz
d = max(input.distance_m / 1000, 0.02) # km
hb = max(input.tx_height_m, 4.0)
hm = max(input.rx_height_m, 1.0)
# Building parameters (defaults for typical urban)
h_roof = input.building_height_m or 15.0 # avg building height
w = input.street_width_m or 20.0 # street width
b = input.building_separation_m or 30.0 # building separation
delta_hb = hb - h_roof # TX above rooftop
delta_hm = h_roof - hm # rooftop above RX
# Free space loss
L_fs = 32.45 + 20 * math.log10(d) + 20 * math.log10(f)
# LOS case
if delta_hb > 0 and d < 0.5:
L = L_fs
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=True,
breakdown={"free_space": L_fs, "rooftop_diffraction": 0, "multiscreen": 0},
)
# Rooftop-to-street diffraction (L_rts)
phi = 90.0 # street orientation angle (worst case)
if phi < 35:
L_ori = -10 + 0.354 * phi
elif phi < 55:
L_ori = 2.5 + 0.075 * (phi - 35)
else:
L_ori = 4.0 - 0.114 * (phi - 55)
L_rts = (
-16.9
- 10 * math.log10(w)
+ 10 * math.log10(f)
+ 20 * math.log10(delta_hm)
+ L_ori
)
# Multi-screen diffraction (L_msd)
if delta_hb > 0:
L_bsh = -18 * math.log10(1 + delta_hb)
k_a = 54
k_d = 18
else:
L_bsh = 0
k_a = 54 - 0.8 * abs(delta_hb)
if d >= 0.5:
k_a = max(k_a, 54 - 0.8 * abs(delta_hb) * (d / 0.5))
k_d = 18 - 15 * abs(delta_hb) / h_roof
k_f = -4 + 0.7 * (f / 925 - 1) # medium city
if input.environment == "urban":
k_f = -4 + 1.5 * (f / 925 - 1)
L_msd = (
L_bsh
+ k_a
+ k_d * math.log10(d)
+ k_f * math.log10(f)
- 9 * math.log10(b)
)
# Total NLOS loss
if L_rts + L_msd > 0:
L = L_fs + L_rts + L_msd
else:
L = L_fs
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=False,
breakdown={
"free_space": L_fs,
"rooftop_diffraction": max(L_rts, 0),
"multiscreen": max(L_msd, 0),
},
)

View File

@@ -0,0 +1,43 @@
"""
Free Space Path Loss (FSPL) model.
Used as baseline and for LOS conditions.
FSPL = 20*log10(d) + 20*log10(f) + 32.45
where d in km, f in MHz
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class FreeSpaceModel(PropagationModel):
"""Free Space Path Loss — theoretical minimum propagation loss."""
@property
def name(self) -> str:
return "Free-Space"
@property
def frequency_range(self) -> tuple:
return (1, 100000)
@property
def distance_range(self) -> tuple:
return (1, 1000000) # 1m to 1000km
def calculate(self, input: PropagationInput) -> PropagationOutput:
d_km = max(input.distance_m / 1000, 0.001)
f = input.frequency_mhz
L = 20 * math.log10(d_km) + 20 * math.log10(f) + 32.45
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=True,
breakdown={
"distance_loss": 20 * math.log10(d_km),
"frequency_loss": 20 * math.log10(f),
"constant": 32.45,
},
)

View File

@@ -0,0 +1,74 @@
"""
ITU-R P.1546 model for point-to-area predictions.
Valid for:
- Frequency: 30-3000 MHz
- Distance: 1-1000 km
- Time percentages: 1%, 10%, 50%
Best for: VHF/UHF broadcasting and land mobile services.
Reference: ITU-R P.1546-6 (2019)
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class ITUR_P1546Model(PropagationModel):
"""
Simplified P.1546 implementation.
Full implementation would include terrain clearance angle,
mixed path (land/sea), and time variability.
"""
@property
def name(self) -> str:
return "ITU-R-P.1546"
@property
def frequency_range(self) -> tuple:
return (30, 3000)
@property
def distance_range(self) -> tuple:
return (1000, 1000000) # 1-1000 km
def calculate(self, input: PropagationInput) -> PropagationOutput:
f = input.frequency_mhz
d = max(input.distance_m / 1000, 1.0) # km
h1 = max(input.tx_height_m, 1.0)
# Nominal frequency bands
if f < 100:
f_nom = 100
elif f < 600:
f_nom = 600
else:
f_nom = 2000
# Basic field strength at 1 kW ERP (from curves, simplified regression)
E_ref = 106.9 - 20 * math.log10(d) # dBuV/m at 1kW
# Height gain for transmitter
delta_h1 = 20 * math.log10(h1 / 10) if h1 > 10 else 0
# Frequency correction
delta_f = 20 * math.log10(f / f_nom)
# Convert field strength to path loss
# L = 139.3 - E + 20*log10(f) (for 50 Ohm)
E = E_ref + delta_h1 - delta_f
L = 139.3 - E + 20 * math.log10(f)
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=d < 5,
breakdown={
"reference_field": E_ref,
"height_gain": delta_h1,
"frequency_correction": delta_f,
"path_loss": L,
},
)

View File

@@ -0,0 +1,87 @@
"""
Knife-edge diffraction model based on ITU-R P.526.
Used for calculating additional loss when terrain or obstacles
block the line of sight between TX and RX.
Reference: ITU-R P.526-15
"""
import math
class KnifeEdgeDiffractionModel:
"""
Single knife-edge diffraction model.
Stateless utility — not a full PropagationModel since it calculates
additional loss, not total path loss.
"""
@staticmethod
def calculate_loss(
d1_m: float,
d2_m: float,
h_m: float,
wavelength_m: float,
) -> float:
"""
Calculate diffraction loss over single knife edge.
Args:
d1_m: Distance from TX to obstacle
d2_m: Distance from obstacle to RX
h_m: Obstacle height above LOS line (positive = above)
wavelength_m: Signal wavelength
Returns:
Loss in dB (always >= 0)
"""
if d1_m <= 0 or d2_m <= 0 or wavelength_m <= 0:
return 0.0
# Fresnel-Kirchhoff parameter
v = h_m * math.sqrt(2 * (d1_m + d2_m) / (wavelength_m * d1_m * d2_m))
# Diffraction loss (Lee approximation)
if v < -0.78:
L = 0.0
elif v < 0:
L = 6.02 + 9.11 * v - 1.27 * v ** 2
elif v < 2.4:
L = 6.02 + 9.11 * v + 1.65 * v ** 2
else:
L = 12.95 + 20 * math.log10(v)
return max(0.0, L)
@staticmethod
def calculate_clearance_loss(
clearance_m: float,
frequency_mhz: float,
) -> float:
"""
Simplified diffraction loss from terrain clearance.
Matches the existing coverage_service._diffraction_loss logic.
Args:
clearance_m: Minimum LOS clearance (negative = blocked)
frequency_mhz: Signal frequency
Returns:
Loss in dB (0 if positive clearance)
"""
if clearance_m >= 0:
return 0.0
v = abs(clearance_m) / 10
if v <= 0:
loss = 0.0
elif v < 2.4:
loss = 6.02 + 9.11 * v - 1.27 * v ** 2
else:
loss = 13.0 + 20 * math.log10(v)
return min(loss, 40.0)

View File

@@ -0,0 +1,75 @@
"""
Longley-Rice Irregular Terrain Model (ITM).
Best for:
- VHF/UHF over irregular terrain
- Point-to-point links
- Distances 1-2000 km
Note: This is a simplified area-mode version.
Full implementation requires terrain profile data.
Reference: NTIA Report 82-100
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class LongleyRiceModel(PropagationModel):
@property
def name(self) -> str:
return "Longley-Rice"
@property
def frequency_range(self) -> tuple:
return (20, 20000) # 20 MHz to 20 GHz
@property
def distance_range(self) -> tuple:
return (1000, 2000000) # 1-2000 km
def calculate(self, input: PropagationInput) -> PropagationOutput:
"""
Simplified Longley-Rice (area mode).
For proper implementation, use splat! or NTIA ITM reference.
"""
f = input.frequency_mhz
d = max(input.distance_m / 1000, 1.0)
h1 = max(input.tx_height_m, 1.0)
h2 = max(input.rx_height_m, 1.0)
# Terrain irregularity parameter (simplified)
delta_h = input.terrain_roughness_m or 90 # Default: rolling hills
# Free space loss
L_fs = 32.45 + 20 * math.log10(d) + 20 * math.log10(f)
# Terrain clutter loss (simplified)
if delta_h < 10:
L_terrain = 0 # Flat
elif delta_h < 50:
L_terrain = 5 # Gently rolling
elif delta_h < 150:
L_terrain = 10 # Rolling hills
else:
L_terrain = 15 # Mountains
# Height gain
h_eff = h1 + h2
height_gain = 10 * math.log10(h_eff / 20) if h_eff > 20 else 0
L = L_fs + L_terrain - height_gain
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=delta_h < 10 and d < 10,
breakdown={
"free_space_loss": L_fs,
"terrain_loss": L_terrain,
"height_gain": height_gain,
},
)

View File

@@ -0,0 +1,74 @@
"""
Okumura-Hata empirical propagation model.
Valid for:
- Frequency: 150-1500 MHz
- Distance: 1-20 km
- TX height: 30-200 m
- RX height: 1-10 m
Reference: Hata (1980), "Empirical Formula for Propagation Loss
in Land Mobile Radio Services"
"""
import math
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
class OkumuraHataModel(PropagationModel):
@property
def name(self) -> str:
return "Okumura-Hata"
@property
def frequency_range(self) -> tuple:
return (150, 1500)
@property
def distance_range(self) -> tuple:
return (100, 20000) # Extended to 100m minimum for practical use
def calculate(self, input: PropagationInput) -> PropagationOutput:
f = input.frequency_mhz
d = max(input.distance_m / 1000, 0.1) # km, min 100m
hb = max(input.tx_height_m, 1.0)
hm = max(input.rx_height_m, 1.0)
# Mobile antenna height correction factor
if input.environment == "urban" and f >= 400:
# Large city
a_hm = 3.2 * (math.log10(11.75 * hm) ** 2) - 4.97
else:
# Medium/small city
a_hm = (1.1 * math.log10(f) - 0.7) * hm - (1.56 * math.log10(f) - 0.8)
# Basic path loss (urban)
L_urban = (
69.55
+ 26.16 * math.log10(f)
- 13.82 * math.log10(hb)
- a_hm
+ (44.9 - 6.55 * math.log10(hb)) * math.log10(d)
)
# Environment correction
if input.environment == "suburban":
L = L_urban - 2 * (math.log10(f / 28) ** 2) - 5.4
elif input.environment == "rural":
L = L_urban - 4.78 * (math.log10(f) ** 2) + 18.33 * math.log10(f) - 35.94
elif input.environment == "open":
L = L_urban - 4.78 * (math.log10(f) ** 2) + 18.33 * math.log10(f) - 40.94
else:
L = L_urban
return PropagationOutput(
path_loss_db=L,
model_name=self.name,
is_los=False,
breakdown={
"basic_loss": L_urban,
"environment_correction": L - L_urban,
"antenna_correction": a_hm,
},
)

View File

@@ -1,5 +1,6 @@
import os
import re
import asyncio
import httpx
import json
from typing import List, Optional
@@ -90,7 +91,10 @@ class BuildingsService:
OpenStreetMap buildings via Overpass API with local caching.
"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
DEFAULT_LEVEL_HEIGHT = 3.0 # meters per floor
DEFAULT_BUILDING_HEIGHT = 9.0 # 3 floors if unknown
@@ -152,7 +156,7 @@ class BuildingsService:
self._memory_cache[bbox_key] = buildings
return buildings
# Fetch from Overpass API
# Fetch from Overpass API with retry
print(f"[Buildings] Fetching from Overpass API...")
query = f"""
@@ -166,17 +170,26 @@ class BuildingsService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
self.OVERPASS_URL,
data={"data": query}
)
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Buildings] Overpass API error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Buildings] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 1s, 2s
print(f"[Buildings] Retrying in {wait_time}s...")
await asyncio.sleep(wait_time)
else:
print(f"[Buildings] All {max_retries} attempts failed")
return []
buildings = self._parse_overpass_response(data)

View File

@@ -0,0 +1,250 @@
"""
Unified cache management for RFCP services.
Provides a single interface for managing all cached data:
- Terrain tiles (SRTM .hgt files, in-memory NumPy arrays)
- OSM building data (disk JSON + in-memory)
- Spatial index data
Tracks memory usage and enforces limits to prevent
memory explosion during large-area calculations.
"""
import os
import sys
import json
import time
import threading
from pathlib import Path
from typing import Optional, Dict, Any, Callable
from datetime import datetime, timedelta
class CacheEntry:
"""Single cache entry with metadata."""
__slots__ = ('value', 'created_at', 'last_accessed', 'size_bytes', 'hits')
def __init__(self, value: Any, size_bytes: int = 0):
self.value = value
self.created_at = time.monotonic()
self.last_accessed = self.created_at
self.size_bytes = size_bytes
self.hits = 0
def touch(self):
self.last_accessed = time.monotonic()
self.hits += 1
class MemoryCache:
"""
In-memory LRU cache with byte-level tracking.
Thread-safe. Evicts least-recently-used entries when
max_size_bytes is exceeded.
"""
def __init__(self, name: str, max_entries: int = 100, max_size_bytes: int = 500 * 1024 * 1024):
self.name = name
self.max_entries = max_entries
self.max_size_bytes = max_size_bytes
self._entries: Dict[str, CacheEntry] = {}
self._lock = threading.Lock()
self._total_bytes = 0
self._total_hits = 0
self._total_misses = 0
def get(self, key: str) -> Optional[Any]:
with self._lock:
entry = self._entries.get(key)
if entry is None:
self._total_misses += 1
return None
entry.touch()
self._total_hits += 1
return entry.value
def put(self, key: str, value: Any, size_bytes: int = 0):
with self._lock:
# Remove existing entry if present
if key in self._entries:
self._total_bytes -= self._entries[key].size_bytes
del self._entries[key]
# Evict if over limits
while (
len(self._entries) >= self.max_entries
or (self._total_bytes + size_bytes > self.max_size_bytes and self._entries)
):
self._evict_lru()
entry = CacheEntry(value, size_bytes)
self._entries[key] = entry
self._total_bytes += size_bytes
def remove(self, key: str) -> bool:
with self._lock:
entry = self._entries.pop(key, None)
if entry:
self._total_bytes -= entry.size_bytes
return True
return False
def clear(self):
with self._lock:
self._entries.clear()
self._total_bytes = 0
def _evict_lru(self):
"""Remove least-recently-used entry. Must hold _lock."""
if not self._entries:
return
lru_key = min(self._entries, key=lambda k: self._entries[k].last_accessed)
entry = self._entries.pop(lru_key)
self._total_bytes -= entry.size_bytes
@property
def size(self) -> int:
return len(self._entries)
@property
def size_bytes(self) -> int:
return self._total_bytes
@property
def size_mb(self) -> float:
return self._total_bytes / (1024 * 1024)
def stats(self) -> dict:
total = self._total_hits + self._total_misses
return {
"name": self.name,
"entries": len(self._entries),
"size_mb": round(self.size_mb, 1),
"max_size_mb": round(self.max_size_bytes / (1024 * 1024), 1),
"hits": self._total_hits,
"misses": self._total_misses,
"hit_rate": round(self._total_hits / total * 100, 1) if total > 0 else 0,
}
class DiskCache:
"""
Persistent disk cache with TTL expiry.
Used for OSM building data and other HTTP responses.
"""
def __init__(self, name: str, base_path: Optional[Path] = None, ttl_days: int = 30):
self.name = name
self.ttl_days = ttl_days
if base_path is None:
base_path = Path(os.environ.get('RFCP_DATA_PATH', './data'))
self.cache_path = base_path / 'cache' / name
self.cache_path.mkdir(parents=True, exist_ok=True)
def _key_to_file(self, key: str) -> Path:
# Sanitize key for filesystem
safe = key.replace('/', '_').replace('\\', '_').replace(':', '_')
return self.cache_path / f"{safe}.json"
def get(self, key: str) -> Optional[Any]:
path = self._key_to_file(key)
if not path.exists():
return None
try:
data = json.loads(path.read_text())
cached_at = datetime.fromisoformat(data.get('_ts', '2000-01-01'))
if datetime.now() - cached_at > timedelta(days=self.ttl_days):
path.unlink(missing_ok=True)
return None
return data.get('v')
except Exception:
return None
def put(self, key: str, value: Any):
path = self._key_to_file(key)
try:
path.write_text(json.dumps({
'_ts': datetime.now().isoformat(),
'v': value,
}))
except Exception as e:
print(f"[DiskCache:{self.name}] Write error: {e}")
def remove(self, key: str) -> bool:
path = self._key_to_file(key)
if path.exists():
path.unlink()
return True
return False
def clear(self):
for f in self.cache_path.glob("*.json"):
f.unlink(missing_ok=True)
def size_mb(self) -> float:
total = sum(f.stat().st_size for f in self.cache_path.glob("*.json") if f.exists())
return total / (1024 * 1024)
def stats(self) -> dict:
files = list(self.cache_path.glob("*.json"))
return {
"name": self.name,
"entries": len(files),
"size_mb": round(self.size_mb(), 1),
"ttl_days": self.ttl_days,
}
class CacheManager:
"""
Unified cache manager for all RFCP services.
Provides:
- terrain: MemoryCache for SRTM tile arrays (~25MB each)
- buildings: MemoryCache for building lists
- spatial: MemoryCache for spatial index objects
- osm_disk: DiskCache for OSM API responses
"""
def __init__(self):
self.terrain = MemoryCache(
"terrain",
max_entries=20, # ~500MB max (25MB per tile)
max_size_bytes=500 * 1024 * 1024,
)
self.buildings = MemoryCache(
"buildings",
max_entries=50,
max_size_bytes=200 * 1024 * 1024,
)
self.spatial = MemoryCache(
"spatial_index",
max_entries=50,
max_size_bytes=100 * 1024 * 1024,
)
self.osm_disk = DiskCache("osm", ttl_days=30)
def clear_all(self):
"""Clear all caches."""
self.terrain.clear()
self.buildings.clear()
self.spatial.clear()
self.osm_disk.clear()
def stats(self) -> dict:
"""Get stats for all caches."""
return {
"terrain": self.terrain.stats(),
"buildings": self.buildings.stats(),
"spatial": self.spatial.stats(),
"osm_disk": self.osm_disk.stats(),
"total_memory_mb": round(
self.terrain.size_mb + self.buildings.size_mb + self.spatial.size_mb, 1
),
}
# Singleton
cache_manager = CacheManager()

View File

@@ -6,7 +6,7 @@ import threading
import numpy as np
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Tuple
from typing import List, Optional, Tuple, Callable
_coverage_log_file = None
@@ -58,6 +58,141 @@ from app.services.parallel_coverage_service import (
CancellationToken,
)
# ── New propagation models (Phase 3.0) ──
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
from app.propagation.free_space import FreeSpaceModel
from app.propagation.okumura_hata import OkumuraHataModel
from app.propagation.cost231_hata import Cost231HataModel
from app.propagation.cost231_wi import Cost231WIModel
from app.propagation.itu_r_p1546 import ITUR_P1546Model
from app.propagation.longley_rice import LongleyRiceModel
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
# Pre-instantiate models (stateless, thread-safe)
_PROPAGATION_MODELS = {
'free_space': FreeSpaceModel(),
'okumura_hata': OkumuraHataModel(),
'cost231_hata': Cost231HataModel(),
'cost231_wi': Cost231WIModel(),
'itu_r_p1546': ITUR_P1546Model(),
'longley_rice': LongleyRiceModel(),
}
_DIFFRACTION_MODEL = KnifeEdgeDiffractionModel()
def select_propagation_model(frequency_mhz: float, environment: str = "urban") -> PropagationModel:
"""Select the best propagation model for a given frequency and environment.
Model selection logic:
- < 150 MHz: Longley-Rice (ITM, designed for VHF)
- 150-520 MHz: ITU-R P.1546 (urban) / Longley-Rice (rural)
- 520-1500 MHz: Okumura-Hata
- 1500-2000 MHz: COST-231 Hata
- > 2000 MHz: Free-Space Path Loss
"""
if frequency_mhz < 150:
return _PROPAGATION_MODELS['longley_rice']
elif frequency_mhz <= 520:
if environment in ('rural', 'open'):
return _PROPAGATION_MODELS['longley_rice']
return _PROPAGATION_MODELS['itu_r_p1546']
elif frequency_mhz <= 1500:
return _PROPAGATION_MODELS['okumura_hata']
elif frequency_mhz <= 2000:
return _PROPAGATION_MODELS['cost231_hata']
else:
return _PROPAGATION_MODELS['free_space']
# ── OSM data filtering ──
# OSM fetches use 1-degree grid cells — much larger than the coverage radius.
# Passing all buildings to ProcessPool workers causes MemoryError (pickle copy
# per worker). Filter to coverage bbox and cap count for safety.
MAX_BUILDINGS_FOR_WORKERS = 15000
def _filter_buildings_to_bbox(
buildings: list,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
site_lat: float, site_lon: float,
log_fn=None,
) -> list:
"""Filter buildings to coverage bbox and cap at MAX_BUILDINGS_FOR_WORKERS.
Returns buildings sorted by distance to site (nearest first) so the
cap preserves buildings most likely to affect coverage.
"""
if not buildings or len(buildings) <= MAX_BUILDINGS_FOR_WORKERS:
return buildings
original = len(buildings)
# Fast bbox filter: keep buildings with any vertex inside the bbox
# Use a small buffer (~500m ≈ 0.005°) for LOS checks near edges
buf = 0.005
filtered = []
for b in buildings:
for lon_pt, lat_pt in b.geometry:
if (min_lat - buf) <= lat_pt <= (max_lat + buf) and \
(min_lon - buf) <= lon_pt <= (max_lon + buf):
filtered.append(b)
break
if log_fn:
log_fn(f"Building bbox filter: {original} -> {len(filtered)}")
# If still too many, sort by centroid distance and cap
if len(filtered) > MAX_BUILDINGS_FOR_WORKERS:
def _centroid_dist(b):
lats = [p[1] for p in b.geometry]
lons = [p[0] for p in b.geometry]
clat = sum(lats) / len(lats)
clon = sum(lons) / len(lons)
return (clat - site_lat) ** 2 + (clon - site_lon) ** 2
filtered.sort(key=_centroid_dist)
filtered = filtered[:MAX_BUILDINGS_FOR_WORKERS]
if log_fn:
log_fn(f"Building distance cap: -> {len(filtered)} (nearest to site)")
return filtered
def _filter_osm_list_to_bbox(items: list, min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
max_count: int = 20000) -> list:
"""Filter OSM items (streets/water/vegetation) to coverage bbox.
Items must have a .geometry attribute (list of [lon, lat] pairs) or
lat/lon attributes. Returns at most max_count items.
"""
if not items or len(items) <= max_count:
return items
buf = 0.005
filtered = []
for item in items:
geom = getattr(item, 'geometry', None) or getattr(item, 'points', None)
if geom:
for pt in geom:
if isinstance(pt, (list, tuple)) and len(pt) >= 2:
lon_pt, lat_pt = pt[0], pt[1]
elif hasattr(pt, 'lat'):
lat_pt, lon_pt = pt.lat, pt.lon
else:
continue
if (min_lat - buf) <= lat_pt <= (max_lat + buf) and \
(min_lon - buf) <= lon_pt <= (max_lon + buf):
filtered.append(item)
break
else:
# No geometry — keep it
filtered.append(item)
return filtered[:max_count]
class CoveragePoint(BaseModel):
lat: float
@@ -79,6 +214,9 @@ class CoverageSettings(BaseModel):
resolution: float = 200 # meters
min_signal: float = -120 # dBm threshold
# Environment type for propagation model selection
environment: str = "urban" # urban, suburban, rural, open
# Layer toggles
use_terrain: bool = True
use_buildings: bool = True
@@ -283,11 +421,13 @@ class CoverageService:
site: SiteParams,
settings: CoverageSettings,
cancel_token: Optional[CancellationToken] = None,
progress_fn: Optional[Callable[[str, float], None]] = None,
) -> List[CoveragePoint]:
"""
Calculate coverage grid for a single site
Returns list of CoveragePoint with RSRP values
Returns list of CoveragePoint with RSRP values.
progress_fn(phase, pct): optional callback for progress updates (0.0-1.0).
"""
calc_start = time.time()
@@ -317,6 +457,9 @@ class CoverageService:
# ━━━ PHASE 1: Fetch OSM data ━━━
_clog("━━━ PHASE 1: Fetching OSM data ━━━")
if progress_fn:
progress_fn("Fetching map data", 0.10)
await asyncio.sleep(0) # Yield so progress_sender can flush WS message
t_osm = time.time()
osm_data = await self._fetch_osm_grid_aligned(
min_lat, min_lon, max_lat, max_lon, settings
@@ -329,6 +472,17 @@ class CoverageService:
vegetation_areas = osm_data["vegetation_areas"]
_clog(f"━━━ PHASE 1 done: {osm_time:.1f}s ━━━")
# ── Filter OSM data to coverage area ──
# OSM cells are 1-degree wide, often far larger than the coverage radius.
# Passing 350k buildings to ProcessPool workers causes MemoryError (pickle).
buildings = _filter_buildings_to_bbox(
buildings, min_lat, min_lon, max_lat, max_lon,
site.lat, site.lon, _clog,
)
streets = _filter_osm_list_to_bbox(streets, min_lat, min_lon, max_lat, max_lon)
water_bodies = _filter_osm_list_to_bbox(water_bodies, min_lat, min_lon, max_lat, max_lon)
vegetation_areas = _filter_osm_list_to_bbox(vegetation_areas, min_lat, min_lon, max_lat, max_lon)
# Build spatial index for buildings
spatial_idx: Optional[SpatialIndex] = None
if buildings:
@@ -337,6 +491,9 @@ class CoverageService:
# ━━━ PHASE 2: Pre-load terrain ━━━
_clog("━━━ PHASE 2: Pre-loading terrain ━━━")
if progress_fn:
progress_fn("Loading terrain", 0.25)
await asyncio.sleep(0)
t_terrain = time.time()
tile_names = await self.terrain.ensure_tiles_for_bbox(
min_lat, min_lon, max_lat, max_lon
@@ -355,6 +512,9 @@ class CoverageService:
_clog(f"━━━ PHASE 2 done: {terrain_time:.1f}s ━━━")
# ━━━ PHASE 2.5: Vectorized pre-computation (GPU/NumPy) ━━━
if progress_fn:
progress_fn("Pre-computing propagation", 0.35)
await asyncio.sleep(0)
from app.services.gpu_service import gpu_service
t_gpu = time.time()
@@ -365,7 +525,8 @@ class CoverageService:
grid_lats, grid_lons, site.lat, site.lon
)
pre_path_loss = gpu_service.precompute_path_loss(
pre_distances, site.frequency, site.height
pre_distances, site.frequency, site.height,
environment=getattr(settings, 'environment', 'urban'),
)
# Build lookup dict for point loop
@@ -377,8 +538,11 @@ class CoverageService:
}
gpu_time = time.time() - t_gpu
env = getattr(settings, 'environment', 'urban')
selected_model = select_propagation_model(site.frequency, env)
_clog(f"━━━ PHASE 2.5: Vectorized pre-computation done: {gpu_time:.3f}s "
f"({len(grid)} points, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━")
f"({len(grid)} points, model={selected_model.name}, freq={site.frequency}MHz, "
f"env={env}, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━")
# ━━━ PHASE 3: Point calculation ━━━
dominant_path_service._log_count = 0 # Reset diagnostic counter
@@ -387,6 +551,10 @@ class CoverageService:
use_parallel = len(grid) > 100 and get_cpu_count() > 1
num_workers = get_cpu_count()
if progress_fn:
progress_fn("Calculating coverage", 0.40)
await asyncio.sleep(0)
if use_parallel:
backend = get_parallel_backend()
_clog(f"━━━ PHASE 3: Calculating {len(grid)} points "
@@ -404,6 +572,7 @@ class CoverageService:
site_elevation, num_workers, _clog,
cancel_token=cancel_token,
precomputed=precomputed,
progress_fn=progress_fn,
),
)
@@ -426,9 +595,14 @@ class CoverageService:
site_elevation, point_elevations,
cancel_token=cancel_token,
precomputed=precomputed,
progress_fn=progress_fn,
),
)
if progress_fn:
progress_fn("Finalizing", 0.95)
await asyncio.sleep(0)
points_time = time.time() - t_points
total_time = time.time() - calc_start
@@ -522,6 +696,7 @@ class CoverageService:
spatial_idx, water_bodies, vegetation_areas,
site_elevation, point_elevations,
cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Sync point loop - runs in ThreadPoolExecutor, bypasses event loop."""
points = []
@@ -538,6 +713,8 @@ class CoverageService:
if i % log_interval == 0:
_clog(f"Progress: {i}/{total} ({i*100//total}%)")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (i / total))
pre = precomputed.get((lat, lon)) if precomputed else None
@@ -581,11 +758,20 @@ class CoverageService:
if distance < 1:
distance = 1
# Base path loss (use precomputed if available)
# Base path loss (use precomputed if available, else use new model)
if precomputed_path_loss is not None:
path_loss = precomputed_path_loss
else:
path_loss = self._okumura_hata(distance, site.frequency, site.height, 1.5)
env = getattr(settings, 'environment', 'urban')
model = select_propagation_model(site.frequency, env)
prop_input = PropagationInput(
frequency_mhz=site.frequency,
distance_m=distance,
tx_height_m=site.height,
rx_height_m=1.5,
environment=env,
)
path_loss = model.calculate(prop_input).path_loss_db
# Antenna pattern
antenna_loss = 0.0
@@ -649,90 +835,105 @@ class CoverageService:
timing["buildings"] += time.time() - t0
# Dominant path (vectorized NumPy) — replaces loop-based sync version
if settings.use_dominant_path and (spatial_idx or nearby_buildings):
# Only enter when there are actual buildings (spatial_idx with data OR non-empty list)
has_building_data = nearby_buildings or (spatial_idx is not None and spatial_idx._grid)
if settings.use_dominant_path and has_building_data:
t0 = time.time()
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx,
)
if dominant['path_type'] == 'direct':
# Direct LOS confirmed by vectorized check
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
# Reflection path bypasses buildings — reduce building loss
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
# Diffraction: use estimated loss if worse than current
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
try:
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx,
)
if dominant['path_type'] == 'direct':
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
except Exception:
pass # Skip dominant path on error — use base model
timing["dominant_path"] += time.time() - t0
# Street canyon (sync)
if settings.use_street_canyon and streets:
t0 = time.time()
canyon_loss, _street_path = street_canyon_service.calculate_street_canyon_loss_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, streets
)
if canyon_loss < (path_loss + terrain_loss + building_loss):
path_loss = canyon_loss
terrain_loss = 0
building_loss = 0
try:
canyon_loss, _street_path = street_canyon_service.calculate_street_canyon_loss_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, streets
)
# Only use street canyon if it's a finite improvement
if math.isfinite(canyon_loss) and canyon_loss < (path_loss + terrain_loss + building_loss):
path_loss = canyon_loss
terrain_loss = 0
building_loss = 0
except Exception:
pass # Skip street canyon on error
timing["street_canyon"] += time.time() - t0
# Vegetation (already sync)
veg_loss = 0.0
if settings.use_vegetation and vegetation_areas:
t0 = time.time()
veg_loss = vegetation_service.calculate_vegetation_loss(
site.lat, site.lon, lat, lon, vegetation_areas, settings.season
)
try:
veg_loss = vegetation_service.calculate_vegetation_loss(
site.lat, site.lon, lat, lon, vegetation_areas, settings.season
)
except Exception:
veg_loss = 0.0
timing["vegetation"] += time.time() - t0
# Reflections (sync)
reflection_gain = 0.0
if settings.use_reflections and nearby_buildings:
t0 = time.time()
is_over_water = False
if settings.use_water_reflection and water_bodies:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
try:
is_over_water = False
if settings.use_water_reflection and water_bodies:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
refl_paths = reflection_service.find_reflection_paths_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
include_ground=True
)
if is_over_water and refl_paths:
water_path = reflection_service._calculate_ground_reflection(
refl_paths = reflection_service.find_reflection_paths_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, is_water=True
site.frequency, nearby_buildings,
include_ground=True
)
if water_path:
refl_paths = [p for p in refl_paths if "ground" not in p.materials]
refl_paths.append(water_path)
refl_paths.sort(key=lambda p: p.total_loss)
if refl_paths:
direct_rsrp = (site.power + site.gain - path_loss - antenna_loss
- terrain_loss - building_loss - veg_loss)
combined_rsrp = reflection_service.combine_paths(
direct_rsrp, refl_paths, site.power + site.gain
)
reflection_gain = max(0, combined_rsrp - direct_rsrp)
if is_over_water and refl_paths:
water_path = reflection_service._calculate_ground_reflection(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, is_water=True
)
if water_path:
refl_paths = [p for p in refl_paths if "ground" not in p.materials]
refl_paths.append(water_path)
refl_paths.sort(key=lambda p: p.total_loss)
if refl_paths:
direct_rsrp = (site.power + site.gain - path_loss - antenna_loss
- terrain_loss - building_loss - veg_loss)
combined_rsrp = reflection_service.combine_paths(
direct_rsrp, refl_paths, site.power + site.gain
)
reflection_gain = max(0, combined_rsrp - direct_rsrp)
except Exception:
reflection_gain = 0.0
timing["reflection"] += time.time() - t0
elif settings.use_water_reflection and water_bodies and not settings.use_reflections:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
if is_over_water:
reflection_gain = 3.0
try:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
if is_over_water:
reflection_gain = 3.0
except Exception:
pass
# Rain
rain_loss = 0.0
@@ -770,26 +971,6 @@ class CoverageService:
indoor_loss=indoor_loss, atmospheric_loss=atmo_loss,
)
def _okumura_hata(
self,
distance: float,
frequency: float,
tx_height: float,
rx_height: float
) -> float:
"""Okumura-Hata path loss model (urban). Returns path loss in dB."""
d_km = distance / 1000
if d_km < 0.1:
d_km = 0.1
a_hm = (1.1 * np.log10(frequency) - 0.7) * rx_height - (1.56 * np.log10(frequency) - 0.8)
L = (69.55 + 26.16 * np.log10(frequency) - 13.82 * np.log10(tx_height) - a_hm +
(44.9 - 6.55 * np.log10(tx_height)) * np.log10(d_km))
return L
def _antenna_pattern_loss(
self,
site_lat: float, site_lon: float,
@@ -831,20 +1012,8 @@ class CoverageService:
return (bearing + 360) % 360
def _diffraction_loss(self, clearance: float, frequency: float) -> float:
"""Knife-edge diffraction loss. Returns additional loss in dB."""
if clearance >= 0:
return 0.0
v = abs(clearance) / 10
if v <= 0:
loss = 0
elif v < 2.4:
loss = 6.02 + 9.11 * v - 1.27 * v**2
else:
loss = 13.0 + 20 * np.log10(v)
return min(loss, 40)
"""Knife-edge diffraction loss using ITU-R P.526 model."""
return _DIFFRACTION_MODEL.calculate_clearance_loss(clearance, frequency)
# Singleton

View File

@@ -139,12 +139,33 @@ def find_dominant_paths_vectorized(
"""
global _vec_log_count
# Fast path: no buildings at all → direct LOS, skip all numpy work
has_spatial_data = spatial_idx is not None and spatial_idx._grid
if not buildings and not has_spatial_data:
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': 0.0,
'reflection_point': None,
}
# Get nearby buildings via spatial index (same filtering as sync version)
if spatial_idx:
line_buildings = spatial_idx.query_line(tx_lat, tx_lon, rx_lat, rx_lon)
else:
line_buildings = buildings
# No nearby buildings along this line → direct LOS
if not line_buildings:
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': 0.0,
'reflection_point': None,
}
line_buildings = _filter_buildings_by_distance(
line_buildings,
(tx_lat, tx_lon), (rx_lat, rx_lon),
@@ -654,6 +675,19 @@ class DominantPathService:
buildings: fallback list (only used if spatial_idx is None)
spatial_idx: grid-based spatial index for fast local queries
"""
# Fast path: no buildings at all → direct LOS only
has_spatial_data = spatial_idx is not None and spatial_idx._grid
if not buildings and not has_spatial_data:
distance = terrain_service.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
return [RayPath(
path_type="direct",
total_distance=distance,
path_loss=self._calculate_path_loss(distance, frequency_mhz, tx_height, rx_height),
reflection_points=[],
materials_crossed=[],
is_valid=True,
)]
paths = []
# Use spatial index to get only buildings along the TX→RX line

View File

@@ -99,8 +99,12 @@ class GPUService:
frequency_mhz: float,
tx_height: float,
rx_height: float = 1.5,
environment: str = "urban",
) -> np.ndarray:
"""Vectorized Okumura-Hata path loss for all distances.
"""Vectorized path loss using the appropriate propagation model.
Selects model based on frequency (Phase 3.0 model selection), then
applies the correct formula in a single vectorized numpy pass.
Returns path loss in dB as a CPU numpy array.
"""
@@ -108,16 +112,47 @@ class GPUService:
d_km = xp.maximum(d_arr / 1000.0, 0.1)
freq = float(frequency_mhz)
h_tx = float(tx_height)
h_rx = float(rx_height)
h_tx = max(float(tx_height), 1.0)
h_rx = max(float(rx_height), 1.0)
log_f = xp.log10(xp.float64(freq))
log_hb = xp.log10(xp.float64(h_tx))
log_hb = xp.log10(xp.float64(max(h_tx, 1.0)))
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
if freq > 2000:
# Free-Space Path Loss: FSPL = 20*log10(d_km) + 20*log10(f) + 32.45
L = 20.0 * xp.log10(d_km) + 20.0 * log_f + 32.45
L = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
elif freq > 1500:
# COST-231 Hata: extends Okumura-Hata to 1500-2000 MHz
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
L = (46.3 + 33.9 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
if environment == "urban":
L += 3.0 # Metropolitan center correction
elif freq >= 150:
# Okumura-Hata: 150-1500 MHz
if environment == "urban" and freq >= 400:
a_hm = 3.2 * (xp.log10(11.75 * h_rx) ** 2) - 4.97
else:
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
L_urban = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
if environment == "suburban":
L = L_urban - 2 * (xp.log10(freq / 28) ** 2) - 5.4
elif environment == "rural":
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 35.94
elif environment == "open":
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 40.94
else:
L = L_urban
else:
# Very low frequency — Longley-Rice simplified (area mode)
# Use FSPL as baseline with terrain roughness correction
L = 20.0 * xp.log10(d_km) + 20.0 * log_f + 32.45 + 10.0
return _to_cpu(L)

View File

@@ -0,0 +1,167 @@
"""
Dedicated OpenStreetMap Overpass API client.
Handles:
- Building footprint queries
- Vegetation area queries
- Water body queries
- Response parsing and error handling
- Rate limiting (Overpass requires courtesy)
"""
import time
import asyncio
from typing import List, Optional, Dict, Any
import httpx
# Overpass API endpoints (primary + mirror)
OVERPASS_ENDPOINTS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Minimum seconds between requests to same endpoint
RATE_LIMIT_SECONDS = 1.0
class OSMClient:
"""
OpenStreetMap Overpass API client with rate limiting
and automatic failover between endpoints.
"""
def __init__(self, timeout: float = 60.0):
self.timeout = timeout
self._last_request_time: float = 0
self._current_endpoint = 0
async def _rate_limit(self):
"""Enforce rate limiting between requests."""
elapsed = time.monotonic() - self._last_request_time
if elapsed < RATE_LIMIT_SECONDS:
await asyncio.sleep(RATE_LIMIT_SECONDS - elapsed)
self._last_request_time = time.monotonic()
async def query(self, overpass_ql: str) -> Optional[Dict[str, Any]]:
"""
Execute an Overpass QL query with automatic failover.
Returns parsed JSON response or None on failure.
"""
await self._rate_limit()
for i in range(len(OVERPASS_ENDPOINTS)):
idx = (self._current_endpoint + i) % len(OVERPASS_ENDPOINTS)
endpoint = OVERPASS_ENDPOINTS[idx]
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
endpoint,
data={"data": overpass_ql},
)
if response.status_code == 429:
# Rate limited — try next endpoint
print(f"[OSM] Rate limited by {endpoint}, trying next...")
continue
response.raise_for_status()
self._current_endpoint = idx
return response.json()
except httpx.TimeoutException:
print(f"[OSM] Timeout from {endpoint}")
continue
except httpx.HTTPStatusError as e:
print(f"[OSM] HTTP error from {endpoint}: {e.response.status_code}")
continue
except Exception as e:
print(f"[OSM] Error from {endpoint}: {e}")
continue
print("[OSM] All endpoints failed")
return None
async def fetch_buildings(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""
Fetch building footprints in a bounding box.
Returns list of raw OSM elements (ways and relations).
"""
query = f"""
[out:json][timeout:30];
(
way["building"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["building"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
async def fetch_vegetation(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""Fetch vegetation areas (forests, parks, etc.)."""
query = f"""
[out:json][timeout:30];
(
way["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
way["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
way["natural"="tree_row"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
async def fetch_water(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""Fetch water bodies (rivers, lakes, etc.)."""
query = f"""
[out:json][timeout:30];
(
way["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
way["waterway"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
# Singleton
osm_client = OSMClient()

View File

@@ -47,6 +47,24 @@ class CancellationToken:
return self._event.is_set()
# ── Active pool tracking (for graceful shutdown) ──
_active_pool = None # Global ref to current ProcessPoolExecutor
_active_pool_lock = threading.Lock()
def _set_active_pool(pool):
global _active_pool
with _active_pool_lock:
_active_pool = pool
def _clear_active_pool():
global _active_pool
with _active_pool_lock:
_active_pool = None
# ── Worker process cleanup ──
def _clog(msg: str):
@@ -57,10 +75,23 @@ def _clog(msg: str):
def _kill_worker_processes() -> int:
"""Kill ALL rfcp-server processes except the current (main) process.
Uses process NAME matching instead of PID tree because psutil.children()
cannot see grandchildren spawned by ProcessPoolExecutor workers.
First shuts down the active ProcessPoolExecutor (if any), then uses
process NAME matching to kill remaining workers.
Returns the number of processes killed.
"""
global _active_pool
# Step 0: Shut down active ProcessPoolExecutor gracefully
with _active_pool_lock:
pool = _active_pool
_active_pool = None
if pool is not None:
try:
pool.shutdown(wait=False, cancel_futures=True)
_clog("Active ProcessPoolExecutor shutdown requested")
except Exception as e:
_clog(f"Pool shutdown error: {e}")
my_pid = os.getpid()
killed_count = 0
@@ -154,10 +185,12 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
# Build or reuse spatial index (expensive — ~1s for 350K buildings).
cache_key = config.get('cache_key', '')
if _worker_cache_key != cache_key:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
if buildings:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
_worker_spatial_idx.build(buildings)
else:
_worker_spatial_idx = None
_worker_cache_key = cache_key
# Process points
@@ -262,6 +295,7 @@ def calculate_coverage_parallel(
log_fn: Optional[Callable[[str], None]] = None,
cancel_token: Optional[CancellationToken] = None,
precomputed: Optional[Dict] = None,
progress_fn: Optional[Callable[[str, float], None]] = None,
) -> Tuple[List[Dict], Dict[str, float]]:
"""Calculate coverage points in parallel.
@@ -287,6 +321,7 @@ def calculate_coverage_parallel(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token, precomputed,
progress_fn,
)
except Exception as e:
log_fn(f"Ray execution failed: {e} — falling back to sequential")
@@ -300,6 +335,7 @@ def calculate_coverage_parallel(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
pool_workers, log_fn, cancel_token, precomputed,
progress_fn,
)
except Exception as e:
log_fn(f"ProcessPool failed: {e} — falling back to sequential")
@@ -310,6 +346,7 @@ def calculate_coverage_parallel(
grid, point_elevations, site_dict, settings_dict,
buildings, streets, water_bodies, vegetation_areas,
site_elevation, log_fn, cancel_token, precomputed,
progress_fn,
)
@@ -321,6 +358,7 @@ def _calculate_with_ray(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Execute using Ray shared-memory object store."""
total_points = len(grid)
@@ -404,6 +442,9 @@ def _calculate_with_ray(
eta = (total_points - pts) / rate if rate > 0 else 0
log_fn(f"Progress: {completed_chunks}/{total_chunks} chunks ({pct}%) — "
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
if progress_fn:
# Map chunk progress to 40%-95% range
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / total_chunks))
calc_time = time.time() - t_calc
log_fn(f"Ray done: {calc_time:.1f}s, {len(all_results)} results "
@@ -428,9 +469,10 @@ def _pool_worker_process_chunk(args):
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
from app.services.spatial_index import SpatialIndex
spatial_idx = SpatialIndex()
spatial_idx = None
if buildings:
from app.services.spatial_index import SpatialIndex
spatial_idx = SpatialIndex()
spatial_idx.build(buildings)
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
@@ -465,32 +507,162 @@ def _pool_worker_process_chunk(args):
return results
def _store_terrain_in_shm(terrain_cache: Dict[str, np.ndarray], log_fn) -> Tuple[list, Dict[str, dict]]:
"""Store terrain tile arrays in shared memory. Returns (shm_blocks, tile_refs).
tile_refs is a dict mapping tile_name -> {shm_name, shape, dtype_str}
that workers use to reconstruct numpy arrays from shared memory.
"""
import multiprocessing.shared_memory as shm_mod
blocks = []
refs = {}
for tile_name, arr in terrain_cache.items():
try:
block = shm_mod.SharedMemory(create=True, size=arr.nbytes)
blocks.append(block)
# Copy tile data to shared memory
shm_arr = np.ndarray(arr.shape, dtype=arr.dtype, buffer=block.buf)
shm_arr[:] = arr[:]
refs[tile_name] = {
'shm_name': block.name,
'shape': arr.shape,
'dtype': str(arr.dtype),
}
except Exception as e:
log_fn(f"Failed to store tile {tile_name} in shm: {e}")
# Fallback: worker will have to use pickled copy
pass
return blocks, refs
def _pool_worker_shm_chunk(args):
"""Worker function that reads terrain from shared memory instead of pickle."""
import multiprocessing.shared_memory as shm_mod
chunk, terrain_shm_refs, buildings, osm_data, config = args
# Reconstruct terrain cache from shared memory (zero-copy numpy views)
terrain_cache = {}
for tile_name, ref in terrain_shm_refs.items():
try:
block = shm_mod.SharedMemory(name=ref['shm_name'])
terrain_cache[tile_name] = np.ndarray(
ref['shape'], dtype=ref['dtype'], buffer=block.buf,
)
except Exception:
pass
# Inject terrain cache
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
# Build spatial index
global _worker_spatial_idx, _worker_cache_key
cache_key = config.get('cache_key', '')
if _worker_cache_key != cache_key:
if buildings:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
_worker_spatial_idx.build(buildings)
else:
_worker_spatial_idx = None
_worker_cache_key = cache_key
# Process points
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
site = SiteParams(**config['site_dict'])
settings = CoverageSettings(**config['settings_dict'])
svc = CoverageService()
timing = {
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
}
precomputed = config.get('precomputed')
results = []
for lat, lon, point_elev in chunk:
pre = precomputed.get((lat, lon)) if precomputed else None
point = svc._calculate_point_sync(
site, lat, lon, settings,
buildings, osm_data.get('streets', []),
_worker_spatial_idx, osm_data.get('water_bodies', []),
osm_data.get('vegetation_areas', []),
config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None,
)
if point.rsrp >= settings.min_signal:
results.append(point.model_dump())
return results
def _calculate_with_process_pool(
grid, point_elevations, site_dict, settings_dict,
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Execute using ProcessPoolExecutor with reduced workers to limit memory."""
"""Execute using ProcessPoolExecutor.
Uses shared memory for terrain tiles (zero-copy numpy views) to reduce
memory usage compared to pickling full terrain arrays per worker.
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
total_points = len(grid)
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers")
# Estimate pickle size for building data and cap workers accordingly
building_count = len(buildings)
if building_count > 10000:
num_workers = min(num_workers, 3)
log_fn(f"Large building set ({building_count}) — reducing workers to {num_workers}")
elif building_count > 5000:
num_workers = min(num_workers, 4)
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers, "
f"{building_count} buildings")
# Store terrain tiles in shared memory
shm_blocks = []
terrain_shm_refs = {}
try:
shm_blocks, terrain_shm_refs = _store_terrain_in_shm(terrain_cache, log_fn)
if terrain_shm_refs:
tile_mb = sum(
np.prod(r['shape']) * np.dtype(r['dtype']).itemsize
for r in terrain_shm_refs.values()
) / (1024 * 1024)
log_fn(f"Stored {len(terrain_shm_refs)} terrain tiles in shared memory ({tile_mb:.0f} MB)")
use_shm = True
else:
use_shm = False
except Exception as e:
log_fn(f"Shared memory setup failed ({e}), using pickle fallback")
use_shm = False
items = [
(lat, lon, point_elevations.get((lat, lon), 0.0))
for lat, lon in grid
]
# Larger chunks than Ray — fewer workers means bigger chunks
chunk_size = max(1, len(items) // (num_workers * 2))
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
cache_key = f"{site_dict['lat']:.4f},{site_dict['lon']:.4f},{len(buildings)}"
config = {
'site_dict': site_dict,
'settings_dict': settings_dict,
'site_elevation': site_elevation,
'cache_key': cache_key,
}
if precomputed:
config['precomputed'] = precomputed
@@ -505,20 +677,32 @@ def _calculate_with_process_pool(
pool = None
try:
# Use spawn context for clean worker processes
ctx = mp.get_context('spawn')
pool = ProcessPoolExecutor(max_workers=num_workers, mp_context=ctx)
futures = {
pool.submit(
_pool_worker_process_chunk,
(chunk, terrain_cache, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
_set_active_pool(pool)
if use_shm:
# Shared memory path: pass shm refs instead of terrain data
worker_fn = _pool_worker_shm_chunk
futures = {
pool.submit(
worker_fn,
(chunk, terrain_shm_refs, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
else:
# Pickle fallback path
futures = {
pool.submit(
_pool_worker_process_chunk,
(chunk, terrain_cache, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
completed_chunks = 0
for future in as_completed(futures):
# Check cancellation between chunks
if cancel_token and cancel_token.is_cancelled:
log_fn(f"Cancelled — cancelling {len(futures) - completed_chunks - 1} pending futures")
for f in futures:
@@ -539,20 +723,27 @@ def _calculate_with_process_pool(
eta = (total_points - pts) / rate if rate > 0 else 0
log_fn(f"Progress: {completed_chunks}/{len(chunks)} chunks ({pct}%) — "
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / len(chunks)))
except Exception as e:
log_fn(f"ProcessPool error: {e}")
finally:
# CRITICAL: Always cleanup pool and orphaned workers
_clear_active_pool()
if pool:
pool.shutdown(wait=False, cancel_futures=True)
# Give pool time to cleanup gracefully
time.sleep(0.5)
# Then force kill any survivors by process name
killed = _kill_worker_processes()
if killed > 0:
log_fn(f"Force killed {killed} orphaned workers")
# Cleanup shared memory blocks
for block in shm_blocks:
try:
block.close()
block.unlink()
except Exception:
pass
calc_time = time.time() - t_calc
log_fn(f"ProcessPool done: {calc_time:.1f}s, {len(all_results)} results "
@@ -561,7 +752,7 @@ def _calculate_with_process_pool(
timing = {
"parallel_total": calc_time,
"workers": num_workers,
"backend": "process_pool",
"backend": "process_pool" + ("/shm" if use_shm else "/pickle"),
}
return all_results, timing
@@ -573,6 +764,7 @@ def _calculate_sequential(
grid, point_elevations, site_dict, settings_dict,
buildings, streets, water_bodies, vegetation_areas,
site_elevation, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Sequential fallback — no extra dependencies, runs in calling thread."""
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
@@ -582,8 +774,9 @@ def _calculate_sequential(
settings = CoverageSettings(**settings_dict)
svc = CoverageService()
spatial_idx = SpatialIndex()
spatial_idx = None
if buildings:
spatial_idx = SpatialIndex()
spatial_idx.build(buildings)
total = len(grid)
@@ -604,6 +797,8 @@ def _calculate_sequential(
if i % log_interval == 0:
log_fn(f"Sequential: {i}/{total} ({i * 100 // total}%)")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (i / total))
point_elev = point_elevations.get((lat, lon), 0.0)

View File

@@ -1,3 +1,4 @@
import asyncio
import numpy as np
from typing import List, Tuple, Optional
from dataclasses import dataclass
@@ -24,7 +25,10 @@ class StreetCanyonService:
Loss increases at corners/turns.
"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Default street widths by type
STREET_WIDTHS = {
@@ -88,14 +92,24 @@ class StreetCanyonService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Streets] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Streets] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
await asyncio.sleep(2 ** attempt)
else:
print(f"[Streets] All {max_retries} attempts failed")
return []
streets = self._parse_streets(data)

View File

@@ -6,6 +6,7 @@ Uses ITU-R P.833 approximations for foliage loss.
"""
import os
import asyncio
import httpx
import json
from typing import List, Tuple, Optional
@@ -81,7 +82,10 @@ class VegetationCache:
class VegetationService:
"""OSM vegetation for signal attenuation"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Attenuation dB per 100 meters of vegetation
ATTENUATION_DB_PER_100M = {
@@ -127,7 +131,7 @@ class VegetationService:
self._memory_cache[cache_key] = areas
return areas
# Fetch from Overpass
# Fetch from Overpass with retry
print(f"[Vegetation] Fetching from Overpass API...")
query = f"""
@@ -143,14 +147,26 @@ class VegetationService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Vegetation] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Vegetation] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 1s, 2s
print(f"[Vegetation] Retrying in {wait_time}s...")
await asyncio.sleep(wait_time)
else:
print(f"[Vegetation] All {max_retries} attempts failed")
return []
areas = self._parse_response(data)

View File

@@ -6,6 +6,7 @@ or create multipath interference for RF signals.
"""
import os
import asyncio
import httpx
import json
from typing import List, Tuple, Optional
@@ -81,7 +82,10 @@ class WaterCache:
class WaterService:
"""OSM water bodies for reflection calculations"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Reflection coefficients by water type
REFLECTION_COEFF = {
@@ -132,14 +136,24 @@ class WaterService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Water] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Water] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
await asyncio.sleep(2 ** attempt)
else:
print(f"[Water] All {max_retries} attempts failed")
return []
bodies = self._parse_response(data)

View File

@@ -0,0 +1,3 @@
"""
Utility modules for RFCP backend.
"""

View File

@@ -0,0 +1,34 @@
"""
Structured logging for RFCP backend.
"""
import os
import sys
import time
import threading
_log_file = None
def rfcp_log(tag: str, msg: str):
"""Log with tag prefix, timestamp, and thread name.
Writes to stdout and a log file for reliability.
"""
global _log_file
ts = time.strftime('%H:%M:%S')
thr = threading.current_thread().name
line = f"[{tag} {ts}] [{thr}] {msg}"
print(line, flush=True)
try:
if _log_file is None:
log_dir = os.environ.get('RFCP_DATA_PATH', './data')
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, 'rfcp-backend.log')
_log_file = open(log_path, 'a')
_log_file.write(line + '\n')
_log_file.flush()
except Exception:
pass

View File

@@ -0,0 +1,44 @@
"""
Progress reporting for long-running calculations.
"""
import time
from typing import Optional, Callable, Awaitable
class ProgressTracker:
"""Track and report calculation progress."""
def __init__(
self,
total: int,
callback: Optional[Callable[[str, float, Optional[float]], Awaitable[None]]] = None,
phase: str = "calculating",
):
self.total = total
self.callback = callback
self.phase = phase
self.completed = 0
self.start_time = time.time()
@property
def progress(self) -> float:
if self.total == 0:
return 1.0
return self.completed / self.total
@property
def eta_seconds(self) -> Optional[float]:
if self.completed == 0:
return None
elapsed = time.time() - self.start_time
rate = self.completed / elapsed
remaining = self.total - self.completed
return remaining / rate if rate > 0 else None
def update(self, n: int = 1):
self.completed += n
async def report(self):
if self.callback:
await self.callback(self.phase, self.progress, self.eta_seconds)

View File

@@ -0,0 +1,54 @@
"""
RF unit conversions.
"""
import math
def dbm_to_watts(dbm: float) -> float:
"""Convert dBm to watts."""
return 10 ** ((dbm - 30) / 10)
def watts_to_dbm(watts: float) -> float:
"""Convert watts to dBm."""
if watts <= 0:
return -float('inf')
return 10 * math.log10(watts) + 30
def dbm_to_mw(dbm: float) -> float:
"""Convert dBm to milliwatts."""
return 10 ** (dbm / 10)
def mw_to_dbm(mw: float) -> float:
"""Convert milliwatts to dBm."""
if mw <= 0:
return -float('inf')
return 10 * math.log10(mw)
def frequency_to_wavelength(frequency_mhz: float) -> float:
"""Convert frequency (MHz) to wavelength (meters)."""
return 300.0 / frequency_mhz
def wavelength_to_frequency(wavelength_m: float) -> float:
"""Convert wavelength (meters) to frequency (MHz)."""
return 300.0 / wavelength_m
def eirp_dbm(power_dbm: float, gain_dbi: float) -> float:
"""Calculate EIRP in dBm."""
return power_dbm + gain_dbi
def eirp_watts(power_dbm: float, gain_dbi: float) -> float:
"""Calculate EIRP in watts."""
return dbm_to_watts(power_dbm + gain_dbi)
def path_loss_to_signal_dbm(power_dbm: float, gain_dbi: float, path_loss_db: float) -> float:
"""Calculate received signal level in dBm from EIRP and path loss."""
return power_dbm + gain_dbi - path_loss_db