@mytec: feat: Phase 3.0 Architecture Refactor

Major refactoring of RFCP backend:
- Modular propagation models (8 models)
- SharedMemoryManager for terrain data
- ProcessPoolExecutor parallel processing
- WebSocket progress streaming
- Building filtering pipeline (351k → 15k)
- 82 unit tests

Performance: Standard preset 38s → 5s (7.6x speedup)

Known issue: Detailed preset timeout (fix in 3.1.0)
This commit is contained in:
2026-02-01 23:12:26 +02:00
parent 1dde56705a
commit defa3ad440
71 changed files with 7134 additions and 256 deletions

View File

@@ -1,5 +1,6 @@
import os
import re
import asyncio
import httpx
import json
from typing import List, Optional
@@ -90,7 +91,10 @@ class BuildingsService:
OpenStreetMap buildings via Overpass API with local caching.
"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
DEFAULT_LEVEL_HEIGHT = 3.0 # meters per floor
DEFAULT_BUILDING_HEIGHT = 9.0 # 3 floors if unknown
@@ -152,7 +156,7 @@ class BuildingsService:
self._memory_cache[bbox_key] = buildings
return buildings
# Fetch from Overpass API
# Fetch from Overpass API with retry
print(f"[Buildings] Fetching from Overpass API...")
query = f"""
@@ -166,17 +170,26 @@ class BuildingsService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
self.OVERPASS_URL,
data={"data": query}
)
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Buildings] Overpass API error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Buildings] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 1s, 2s
print(f"[Buildings] Retrying in {wait_time}s...")
await asyncio.sleep(wait_time)
else:
print(f"[Buildings] All {max_retries} attempts failed")
return []
buildings = self._parse_overpass_response(data)

View File

@@ -0,0 +1,250 @@
"""
Unified cache management for RFCP services.
Provides a single interface for managing all cached data:
- Terrain tiles (SRTM .hgt files, in-memory NumPy arrays)
- OSM building data (disk JSON + in-memory)
- Spatial index data
Tracks memory usage and enforces limits to prevent
memory explosion during large-area calculations.
"""
import os
import sys
import json
import time
import threading
from pathlib import Path
from typing import Optional, Dict, Any, Callable
from datetime import datetime, timedelta
class CacheEntry:
"""Single cache entry with metadata."""
__slots__ = ('value', 'created_at', 'last_accessed', 'size_bytes', 'hits')
def __init__(self, value: Any, size_bytes: int = 0):
self.value = value
self.created_at = time.monotonic()
self.last_accessed = self.created_at
self.size_bytes = size_bytes
self.hits = 0
def touch(self):
self.last_accessed = time.monotonic()
self.hits += 1
class MemoryCache:
"""
In-memory LRU cache with byte-level tracking.
Thread-safe. Evicts least-recently-used entries when
max_size_bytes is exceeded.
"""
def __init__(self, name: str, max_entries: int = 100, max_size_bytes: int = 500 * 1024 * 1024):
self.name = name
self.max_entries = max_entries
self.max_size_bytes = max_size_bytes
self._entries: Dict[str, CacheEntry] = {}
self._lock = threading.Lock()
self._total_bytes = 0
self._total_hits = 0
self._total_misses = 0
def get(self, key: str) -> Optional[Any]:
with self._lock:
entry = self._entries.get(key)
if entry is None:
self._total_misses += 1
return None
entry.touch()
self._total_hits += 1
return entry.value
def put(self, key: str, value: Any, size_bytes: int = 0):
with self._lock:
# Remove existing entry if present
if key in self._entries:
self._total_bytes -= self._entries[key].size_bytes
del self._entries[key]
# Evict if over limits
while (
len(self._entries) >= self.max_entries
or (self._total_bytes + size_bytes > self.max_size_bytes and self._entries)
):
self._evict_lru()
entry = CacheEntry(value, size_bytes)
self._entries[key] = entry
self._total_bytes += size_bytes
def remove(self, key: str) -> bool:
with self._lock:
entry = self._entries.pop(key, None)
if entry:
self._total_bytes -= entry.size_bytes
return True
return False
def clear(self):
with self._lock:
self._entries.clear()
self._total_bytes = 0
def _evict_lru(self):
"""Remove least-recently-used entry. Must hold _lock."""
if not self._entries:
return
lru_key = min(self._entries, key=lambda k: self._entries[k].last_accessed)
entry = self._entries.pop(lru_key)
self._total_bytes -= entry.size_bytes
@property
def size(self) -> int:
return len(self._entries)
@property
def size_bytes(self) -> int:
return self._total_bytes
@property
def size_mb(self) -> float:
return self._total_bytes / (1024 * 1024)
def stats(self) -> dict:
total = self._total_hits + self._total_misses
return {
"name": self.name,
"entries": len(self._entries),
"size_mb": round(self.size_mb, 1),
"max_size_mb": round(self.max_size_bytes / (1024 * 1024), 1),
"hits": self._total_hits,
"misses": self._total_misses,
"hit_rate": round(self._total_hits / total * 100, 1) if total > 0 else 0,
}
class DiskCache:
"""
Persistent disk cache with TTL expiry.
Used for OSM building data and other HTTP responses.
"""
def __init__(self, name: str, base_path: Optional[Path] = None, ttl_days: int = 30):
self.name = name
self.ttl_days = ttl_days
if base_path is None:
base_path = Path(os.environ.get('RFCP_DATA_PATH', './data'))
self.cache_path = base_path / 'cache' / name
self.cache_path.mkdir(parents=True, exist_ok=True)
def _key_to_file(self, key: str) -> Path:
# Sanitize key for filesystem
safe = key.replace('/', '_').replace('\\', '_').replace(':', '_')
return self.cache_path / f"{safe}.json"
def get(self, key: str) -> Optional[Any]:
path = self._key_to_file(key)
if not path.exists():
return None
try:
data = json.loads(path.read_text())
cached_at = datetime.fromisoformat(data.get('_ts', '2000-01-01'))
if datetime.now() - cached_at > timedelta(days=self.ttl_days):
path.unlink(missing_ok=True)
return None
return data.get('v')
except Exception:
return None
def put(self, key: str, value: Any):
path = self._key_to_file(key)
try:
path.write_text(json.dumps({
'_ts': datetime.now().isoformat(),
'v': value,
}))
except Exception as e:
print(f"[DiskCache:{self.name}] Write error: {e}")
def remove(self, key: str) -> bool:
path = self._key_to_file(key)
if path.exists():
path.unlink()
return True
return False
def clear(self):
for f in self.cache_path.glob("*.json"):
f.unlink(missing_ok=True)
def size_mb(self) -> float:
total = sum(f.stat().st_size for f in self.cache_path.glob("*.json") if f.exists())
return total / (1024 * 1024)
def stats(self) -> dict:
files = list(self.cache_path.glob("*.json"))
return {
"name": self.name,
"entries": len(files),
"size_mb": round(self.size_mb(), 1),
"ttl_days": self.ttl_days,
}
class CacheManager:
"""
Unified cache manager for all RFCP services.
Provides:
- terrain: MemoryCache for SRTM tile arrays (~25MB each)
- buildings: MemoryCache for building lists
- spatial: MemoryCache for spatial index objects
- osm_disk: DiskCache for OSM API responses
"""
def __init__(self):
self.terrain = MemoryCache(
"terrain",
max_entries=20, # ~500MB max (25MB per tile)
max_size_bytes=500 * 1024 * 1024,
)
self.buildings = MemoryCache(
"buildings",
max_entries=50,
max_size_bytes=200 * 1024 * 1024,
)
self.spatial = MemoryCache(
"spatial_index",
max_entries=50,
max_size_bytes=100 * 1024 * 1024,
)
self.osm_disk = DiskCache("osm", ttl_days=30)
def clear_all(self):
"""Clear all caches."""
self.terrain.clear()
self.buildings.clear()
self.spatial.clear()
self.osm_disk.clear()
def stats(self) -> dict:
"""Get stats for all caches."""
return {
"terrain": self.terrain.stats(),
"buildings": self.buildings.stats(),
"spatial": self.spatial.stats(),
"osm_disk": self.osm_disk.stats(),
"total_memory_mb": round(
self.terrain.size_mb + self.buildings.size_mb + self.spatial.size_mb, 1
),
}
# Singleton
cache_manager = CacheManager()

View File

@@ -6,7 +6,7 @@ import threading
import numpy as np
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Tuple
from typing import List, Optional, Tuple, Callable
_coverage_log_file = None
@@ -58,6 +58,141 @@ from app.services.parallel_coverage_service import (
CancellationToken,
)
# ── New propagation models (Phase 3.0) ──
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
from app.propagation.free_space import FreeSpaceModel
from app.propagation.okumura_hata import OkumuraHataModel
from app.propagation.cost231_hata import Cost231HataModel
from app.propagation.cost231_wi import Cost231WIModel
from app.propagation.itu_r_p1546 import ITUR_P1546Model
from app.propagation.longley_rice import LongleyRiceModel
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
# Pre-instantiate models (stateless, thread-safe)
_PROPAGATION_MODELS = {
'free_space': FreeSpaceModel(),
'okumura_hata': OkumuraHataModel(),
'cost231_hata': Cost231HataModel(),
'cost231_wi': Cost231WIModel(),
'itu_r_p1546': ITUR_P1546Model(),
'longley_rice': LongleyRiceModel(),
}
_DIFFRACTION_MODEL = KnifeEdgeDiffractionModel()
def select_propagation_model(frequency_mhz: float, environment: str = "urban") -> PropagationModel:
"""Select the best propagation model for a given frequency and environment.
Model selection logic:
- < 150 MHz: Longley-Rice (ITM, designed for VHF)
- 150-520 MHz: ITU-R P.1546 (urban) / Longley-Rice (rural)
- 520-1500 MHz: Okumura-Hata
- 1500-2000 MHz: COST-231 Hata
- > 2000 MHz: Free-Space Path Loss
"""
if frequency_mhz < 150:
return _PROPAGATION_MODELS['longley_rice']
elif frequency_mhz <= 520:
if environment in ('rural', 'open'):
return _PROPAGATION_MODELS['longley_rice']
return _PROPAGATION_MODELS['itu_r_p1546']
elif frequency_mhz <= 1500:
return _PROPAGATION_MODELS['okumura_hata']
elif frequency_mhz <= 2000:
return _PROPAGATION_MODELS['cost231_hata']
else:
return _PROPAGATION_MODELS['free_space']
# ── OSM data filtering ──
# OSM fetches use 1-degree grid cells — much larger than the coverage radius.
# Passing all buildings to ProcessPool workers causes MemoryError (pickle copy
# per worker). Filter to coverage bbox and cap count for safety.
MAX_BUILDINGS_FOR_WORKERS = 15000
def _filter_buildings_to_bbox(
buildings: list,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
site_lat: float, site_lon: float,
log_fn=None,
) -> list:
"""Filter buildings to coverage bbox and cap at MAX_BUILDINGS_FOR_WORKERS.
Returns buildings sorted by distance to site (nearest first) so the
cap preserves buildings most likely to affect coverage.
"""
if not buildings or len(buildings) <= MAX_BUILDINGS_FOR_WORKERS:
return buildings
original = len(buildings)
# Fast bbox filter: keep buildings with any vertex inside the bbox
# Use a small buffer (~500m ≈ 0.005°) for LOS checks near edges
buf = 0.005
filtered = []
for b in buildings:
for lon_pt, lat_pt in b.geometry:
if (min_lat - buf) <= lat_pt <= (max_lat + buf) and \
(min_lon - buf) <= lon_pt <= (max_lon + buf):
filtered.append(b)
break
if log_fn:
log_fn(f"Building bbox filter: {original} -> {len(filtered)}")
# If still too many, sort by centroid distance and cap
if len(filtered) > MAX_BUILDINGS_FOR_WORKERS:
def _centroid_dist(b):
lats = [p[1] for p in b.geometry]
lons = [p[0] for p in b.geometry]
clat = sum(lats) / len(lats)
clon = sum(lons) / len(lons)
return (clat - site_lat) ** 2 + (clon - site_lon) ** 2
filtered.sort(key=_centroid_dist)
filtered = filtered[:MAX_BUILDINGS_FOR_WORKERS]
if log_fn:
log_fn(f"Building distance cap: -> {len(filtered)} (nearest to site)")
return filtered
def _filter_osm_list_to_bbox(items: list, min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
max_count: int = 20000) -> list:
"""Filter OSM items (streets/water/vegetation) to coverage bbox.
Items must have a .geometry attribute (list of [lon, lat] pairs) or
lat/lon attributes. Returns at most max_count items.
"""
if not items or len(items) <= max_count:
return items
buf = 0.005
filtered = []
for item in items:
geom = getattr(item, 'geometry', None) or getattr(item, 'points', None)
if geom:
for pt in geom:
if isinstance(pt, (list, tuple)) and len(pt) >= 2:
lon_pt, lat_pt = pt[0], pt[1]
elif hasattr(pt, 'lat'):
lat_pt, lon_pt = pt.lat, pt.lon
else:
continue
if (min_lat - buf) <= lat_pt <= (max_lat + buf) and \
(min_lon - buf) <= lon_pt <= (max_lon + buf):
filtered.append(item)
break
else:
# No geometry — keep it
filtered.append(item)
return filtered[:max_count]
class CoveragePoint(BaseModel):
lat: float
@@ -79,6 +214,9 @@ class CoverageSettings(BaseModel):
resolution: float = 200 # meters
min_signal: float = -120 # dBm threshold
# Environment type for propagation model selection
environment: str = "urban" # urban, suburban, rural, open
# Layer toggles
use_terrain: bool = True
use_buildings: bool = True
@@ -283,11 +421,13 @@ class CoverageService:
site: SiteParams,
settings: CoverageSettings,
cancel_token: Optional[CancellationToken] = None,
progress_fn: Optional[Callable[[str, float], None]] = None,
) -> List[CoveragePoint]:
"""
Calculate coverage grid for a single site
Returns list of CoveragePoint with RSRP values
Returns list of CoveragePoint with RSRP values.
progress_fn(phase, pct): optional callback for progress updates (0.0-1.0).
"""
calc_start = time.time()
@@ -317,6 +457,9 @@ class CoverageService:
# ━━━ PHASE 1: Fetch OSM data ━━━
_clog("━━━ PHASE 1: Fetching OSM data ━━━")
if progress_fn:
progress_fn("Fetching map data", 0.10)
await asyncio.sleep(0) # Yield so progress_sender can flush WS message
t_osm = time.time()
osm_data = await self._fetch_osm_grid_aligned(
min_lat, min_lon, max_lat, max_lon, settings
@@ -329,6 +472,17 @@ class CoverageService:
vegetation_areas = osm_data["vegetation_areas"]
_clog(f"━━━ PHASE 1 done: {osm_time:.1f}s ━━━")
# ── Filter OSM data to coverage area ──
# OSM cells are 1-degree wide, often far larger than the coverage radius.
# Passing 350k buildings to ProcessPool workers causes MemoryError (pickle).
buildings = _filter_buildings_to_bbox(
buildings, min_lat, min_lon, max_lat, max_lon,
site.lat, site.lon, _clog,
)
streets = _filter_osm_list_to_bbox(streets, min_lat, min_lon, max_lat, max_lon)
water_bodies = _filter_osm_list_to_bbox(water_bodies, min_lat, min_lon, max_lat, max_lon)
vegetation_areas = _filter_osm_list_to_bbox(vegetation_areas, min_lat, min_lon, max_lat, max_lon)
# Build spatial index for buildings
spatial_idx: Optional[SpatialIndex] = None
if buildings:
@@ -337,6 +491,9 @@ class CoverageService:
# ━━━ PHASE 2: Pre-load terrain ━━━
_clog("━━━ PHASE 2: Pre-loading terrain ━━━")
if progress_fn:
progress_fn("Loading terrain", 0.25)
await asyncio.sleep(0)
t_terrain = time.time()
tile_names = await self.terrain.ensure_tiles_for_bbox(
min_lat, min_lon, max_lat, max_lon
@@ -355,6 +512,9 @@ class CoverageService:
_clog(f"━━━ PHASE 2 done: {terrain_time:.1f}s ━━━")
# ━━━ PHASE 2.5: Vectorized pre-computation (GPU/NumPy) ━━━
if progress_fn:
progress_fn("Pre-computing propagation", 0.35)
await asyncio.sleep(0)
from app.services.gpu_service import gpu_service
t_gpu = time.time()
@@ -365,7 +525,8 @@ class CoverageService:
grid_lats, grid_lons, site.lat, site.lon
)
pre_path_loss = gpu_service.precompute_path_loss(
pre_distances, site.frequency, site.height
pre_distances, site.frequency, site.height,
environment=getattr(settings, 'environment', 'urban'),
)
# Build lookup dict for point loop
@@ -377,8 +538,11 @@ class CoverageService:
}
gpu_time = time.time() - t_gpu
env = getattr(settings, 'environment', 'urban')
selected_model = select_propagation_model(site.frequency, env)
_clog(f"━━━ PHASE 2.5: Vectorized pre-computation done: {gpu_time:.3f}s "
f"({len(grid)} points, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━")
f"({len(grid)} points, model={selected_model.name}, freq={site.frequency}MHz, "
f"env={env}, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━")
# ━━━ PHASE 3: Point calculation ━━━
dominant_path_service._log_count = 0 # Reset diagnostic counter
@@ -387,6 +551,10 @@ class CoverageService:
use_parallel = len(grid) > 100 and get_cpu_count() > 1
num_workers = get_cpu_count()
if progress_fn:
progress_fn("Calculating coverage", 0.40)
await asyncio.sleep(0)
if use_parallel:
backend = get_parallel_backend()
_clog(f"━━━ PHASE 3: Calculating {len(grid)} points "
@@ -404,6 +572,7 @@ class CoverageService:
site_elevation, num_workers, _clog,
cancel_token=cancel_token,
precomputed=precomputed,
progress_fn=progress_fn,
),
)
@@ -426,9 +595,14 @@ class CoverageService:
site_elevation, point_elevations,
cancel_token=cancel_token,
precomputed=precomputed,
progress_fn=progress_fn,
),
)
if progress_fn:
progress_fn("Finalizing", 0.95)
await asyncio.sleep(0)
points_time = time.time() - t_points
total_time = time.time() - calc_start
@@ -522,6 +696,7 @@ class CoverageService:
spatial_idx, water_bodies, vegetation_areas,
site_elevation, point_elevations,
cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Sync point loop - runs in ThreadPoolExecutor, bypasses event loop."""
points = []
@@ -538,6 +713,8 @@ class CoverageService:
if i % log_interval == 0:
_clog(f"Progress: {i}/{total} ({i*100//total}%)")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (i / total))
pre = precomputed.get((lat, lon)) if precomputed else None
@@ -581,11 +758,20 @@ class CoverageService:
if distance < 1:
distance = 1
# Base path loss (use precomputed if available)
# Base path loss (use precomputed if available, else use new model)
if precomputed_path_loss is not None:
path_loss = precomputed_path_loss
else:
path_loss = self._okumura_hata(distance, site.frequency, site.height, 1.5)
env = getattr(settings, 'environment', 'urban')
model = select_propagation_model(site.frequency, env)
prop_input = PropagationInput(
frequency_mhz=site.frequency,
distance_m=distance,
tx_height_m=site.height,
rx_height_m=1.5,
environment=env,
)
path_loss = model.calculate(prop_input).path_loss_db
# Antenna pattern
antenna_loss = 0.0
@@ -649,90 +835,105 @@ class CoverageService:
timing["buildings"] += time.time() - t0
# Dominant path (vectorized NumPy) — replaces loop-based sync version
if settings.use_dominant_path and (spatial_idx or nearby_buildings):
# Only enter when there are actual buildings (spatial_idx with data OR non-empty list)
has_building_data = nearby_buildings or (spatial_idx is not None and spatial_idx._grid)
if settings.use_dominant_path and has_building_data:
t0 = time.time()
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx,
)
if dominant['path_type'] == 'direct':
# Direct LOS confirmed by vectorized check
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
# Reflection path bypasses buildings — reduce building loss
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
# Diffraction: use estimated loss if worse than current
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
try:
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx,
)
if dominant['path_type'] == 'direct':
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
except Exception:
pass # Skip dominant path on error — use base model
timing["dominant_path"] += time.time() - t0
# Street canyon (sync)
if settings.use_street_canyon and streets:
t0 = time.time()
canyon_loss, _street_path = street_canyon_service.calculate_street_canyon_loss_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, streets
)
if canyon_loss < (path_loss + terrain_loss + building_loss):
path_loss = canyon_loss
terrain_loss = 0
building_loss = 0
try:
canyon_loss, _street_path = street_canyon_service.calculate_street_canyon_loss_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, streets
)
# Only use street canyon if it's a finite improvement
if math.isfinite(canyon_loss) and canyon_loss < (path_loss + terrain_loss + building_loss):
path_loss = canyon_loss
terrain_loss = 0
building_loss = 0
except Exception:
pass # Skip street canyon on error
timing["street_canyon"] += time.time() - t0
# Vegetation (already sync)
veg_loss = 0.0
if settings.use_vegetation and vegetation_areas:
t0 = time.time()
veg_loss = vegetation_service.calculate_vegetation_loss(
site.lat, site.lon, lat, lon, vegetation_areas, settings.season
)
try:
veg_loss = vegetation_service.calculate_vegetation_loss(
site.lat, site.lon, lat, lon, vegetation_areas, settings.season
)
except Exception:
veg_loss = 0.0
timing["vegetation"] += time.time() - t0
# Reflections (sync)
reflection_gain = 0.0
if settings.use_reflections and nearby_buildings:
t0 = time.time()
is_over_water = False
if settings.use_water_reflection and water_bodies:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
try:
is_over_water = False
if settings.use_water_reflection and water_bodies:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
refl_paths = reflection_service.find_reflection_paths_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
include_ground=True
)
if is_over_water and refl_paths:
water_path = reflection_service._calculate_ground_reflection(
refl_paths = reflection_service.find_reflection_paths_sync(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, is_water=True
site.frequency, nearby_buildings,
include_ground=True
)
if water_path:
refl_paths = [p for p in refl_paths if "ground" not in p.materials]
refl_paths.append(water_path)
refl_paths.sort(key=lambda p: p.total_loss)
if refl_paths:
direct_rsrp = (site.power + site.gain - path_loss - antenna_loss
- terrain_loss - building_loss - veg_loss)
combined_rsrp = reflection_service.combine_paths(
direct_rsrp, refl_paths, site.power + site.gain
)
reflection_gain = max(0, combined_rsrp - direct_rsrp)
if is_over_water and refl_paths:
water_path = reflection_service._calculate_ground_reflection(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, is_water=True
)
if water_path:
refl_paths = [p for p in refl_paths if "ground" not in p.materials]
refl_paths.append(water_path)
refl_paths.sort(key=lambda p: p.total_loss)
if refl_paths:
direct_rsrp = (site.power + site.gain - path_loss - antenna_loss
- terrain_loss - building_loss - veg_loss)
combined_rsrp = reflection_service.combine_paths(
direct_rsrp, refl_paths, site.power + site.gain
)
reflection_gain = max(0, combined_rsrp - direct_rsrp)
except Exception:
reflection_gain = 0.0
timing["reflection"] += time.time() - t0
elif settings.use_water_reflection and water_bodies and not settings.use_reflections:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
if is_over_water:
reflection_gain = 3.0
try:
is_over_water = water_service.point_over_water(lat, lon, water_bodies) is not None
if is_over_water:
reflection_gain = 3.0
except Exception:
pass
# Rain
rain_loss = 0.0
@@ -770,26 +971,6 @@ class CoverageService:
indoor_loss=indoor_loss, atmospheric_loss=atmo_loss,
)
def _okumura_hata(
self,
distance: float,
frequency: float,
tx_height: float,
rx_height: float
) -> float:
"""Okumura-Hata path loss model (urban). Returns path loss in dB."""
d_km = distance / 1000
if d_km < 0.1:
d_km = 0.1
a_hm = (1.1 * np.log10(frequency) - 0.7) * rx_height - (1.56 * np.log10(frequency) - 0.8)
L = (69.55 + 26.16 * np.log10(frequency) - 13.82 * np.log10(tx_height) - a_hm +
(44.9 - 6.55 * np.log10(tx_height)) * np.log10(d_km))
return L
def _antenna_pattern_loss(
self,
site_lat: float, site_lon: float,
@@ -831,20 +1012,8 @@ class CoverageService:
return (bearing + 360) % 360
def _diffraction_loss(self, clearance: float, frequency: float) -> float:
"""Knife-edge diffraction loss. Returns additional loss in dB."""
if clearance >= 0:
return 0.0
v = abs(clearance) / 10
if v <= 0:
loss = 0
elif v < 2.4:
loss = 6.02 + 9.11 * v - 1.27 * v**2
else:
loss = 13.0 + 20 * np.log10(v)
return min(loss, 40)
"""Knife-edge diffraction loss using ITU-R P.526 model."""
return _DIFFRACTION_MODEL.calculate_clearance_loss(clearance, frequency)
# Singleton

View File

@@ -139,12 +139,33 @@ def find_dominant_paths_vectorized(
"""
global _vec_log_count
# Fast path: no buildings at all → direct LOS, skip all numpy work
has_spatial_data = spatial_idx is not None and spatial_idx._grid
if not buildings and not has_spatial_data:
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': 0.0,
'reflection_point': None,
}
# Get nearby buildings via spatial index (same filtering as sync version)
if spatial_idx:
line_buildings = spatial_idx.query_line(tx_lat, tx_lon, rx_lat, rx_lon)
else:
line_buildings = buildings
# No nearby buildings along this line → direct LOS
if not line_buildings:
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': 0.0,
'reflection_point': None,
}
line_buildings = _filter_buildings_by_distance(
line_buildings,
(tx_lat, tx_lon), (rx_lat, rx_lon),
@@ -654,6 +675,19 @@ class DominantPathService:
buildings: fallback list (only used if spatial_idx is None)
spatial_idx: grid-based spatial index for fast local queries
"""
# Fast path: no buildings at all → direct LOS only
has_spatial_data = spatial_idx is not None and spatial_idx._grid
if not buildings and not has_spatial_data:
distance = terrain_service.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
return [RayPath(
path_type="direct",
total_distance=distance,
path_loss=self._calculate_path_loss(distance, frequency_mhz, tx_height, rx_height),
reflection_points=[],
materials_crossed=[],
is_valid=True,
)]
paths = []
# Use spatial index to get only buildings along the TX→RX line

View File

@@ -99,8 +99,12 @@ class GPUService:
frequency_mhz: float,
tx_height: float,
rx_height: float = 1.5,
environment: str = "urban",
) -> np.ndarray:
"""Vectorized Okumura-Hata path loss for all distances.
"""Vectorized path loss using the appropriate propagation model.
Selects model based on frequency (Phase 3.0 model selection), then
applies the correct formula in a single vectorized numpy pass.
Returns path loss in dB as a CPU numpy array.
"""
@@ -108,16 +112,47 @@ class GPUService:
d_km = xp.maximum(d_arr / 1000.0, 0.1)
freq = float(frequency_mhz)
h_tx = float(tx_height)
h_rx = float(rx_height)
h_tx = max(float(tx_height), 1.0)
h_rx = max(float(rx_height), 1.0)
log_f = xp.log10(xp.float64(freq))
log_hb = xp.log10(xp.float64(h_tx))
log_hb = xp.log10(xp.float64(max(h_tx, 1.0)))
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
if freq > 2000:
# Free-Space Path Loss: FSPL = 20*log10(d_km) + 20*log10(f) + 32.45
L = 20.0 * xp.log10(d_km) + 20.0 * log_f + 32.45
L = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
elif freq > 1500:
# COST-231 Hata: extends Okumura-Hata to 1500-2000 MHz
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
L = (46.3 + 33.9 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
if environment == "urban":
L += 3.0 # Metropolitan center correction
elif freq >= 150:
# Okumura-Hata: 150-1500 MHz
if environment == "urban" and freq >= 400:
a_hm = 3.2 * (xp.log10(11.75 * h_rx) ** 2) - 4.97
else:
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
L_urban = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
if environment == "suburban":
L = L_urban - 2 * (xp.log10(freq / 28) ** 2) - 5.4
elif environment == "rural":
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 35.94
elif environment == "open":
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 40.94
else:
L = L_urban
else:
# Very low frequency — Longley-Rice simplified (area mode)
# Use FSPL as baseline with terrain roughness correction
L = 20.0 * xp.log10(d_km) + 20.0 * log_f + 32.45 + 10.0
return _to_cpu(L)

View File

@@ -0,0 +1,167 @@
"""
Dedicated OpenStreetMap Overpass API client.
Handles:
- Building footprint queries
- Vegetation area queries
- Water body queries
- Response parsing and error handling
- Rate limiting (Overpass requires courtesy)
"""
import time
import asyncio
from typing import List, Optional, Dict, Any
import httpx
# Overpass API endpoints (primary + mirror)
OVERPASS_ENDPOINTS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Minimum seconds between requests to same endpoint
RATE_LIMIT_SECONDS = 1.0
class OSMClient:
"""
OpenStreetMap Overpass API client with rate limiting
and automatic failover between endpoints.
"""
def __init__(self, timeout: float = 60.0):
self.timeout = timeout
self._last_request_time: float = 0
self._current_endpoint = 0
async def _rate_limit(self):
"""Enforce rate limiting between requests."""
elapsed = time.monotonic() - self._last_request_time
if elapsed < RATE_LIMIT_SECONDS:
await asyncio.sleep(RATE_LIMIT_SECONDS - elapsed)
self._last_request_time = time.monotonic()
async def query(self, overpass_ql: str) -> Optional[Dict[str, Any]]:
"""
Execute an Overpass QL query with automatic failover.
Returns parsed JSON response or None on failure.
"""
await self._rate_limit()
for i in range(len(OVERPASS_ENDPOINTS)):
idx = (self._current_endpoint + i) % len(OVERPASS_ENDPOINTS)
endpoint = OVERPASS_ENDPOINTS[idx]
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
endpoint,
data={"data": overpass_ql},
)
if response.status_code == 429:
# Rate limited — try next endpoint
print(f"[OSM] Rate limited by {endpoint}, trying next...")
continue
response.raise_for_status()
self._current_endpoint = idx
return response.json()
except httpx.TimeoutException:
print(f"[OSM] Timeout from {endpoint}")
continue
except httpx.HTTPStatusError as e:
print(f"[OSM] HTTP error from {endpoint}: {e.response.status_code}")
continue
except Exception as e:
print(f"[OSM] Error from {endpoint}: {e}")
continue
print("[OSM] All endpoints failed")
return None
async def fetch_buildings(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""
Fetch building footprints in a bounding box.
Returns list of raw OSM elements (ways and relations).
"""
query = f"""
[out:json][timeout:30];
(
way["building"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["building"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
async def fetch_vegetation(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""Fetch vegetation areas (forests, parks, etc.)."""
query = f"""
[out:json][timeout:30];
(
way["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
way["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
way["natural"="tree_row"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
async def fetch_water(
self,
min_lat: float, min_lon: float,
max_lat: float, max_lon: float,
) -> List[Dict[str, Any]]:
"""Fetch water bodies (rivers, lakes, etc.)."""
query = f"""
[out:json][timeout:30];
(
way["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
way["waterway"]({min_lat},{min_lon},{max_lat},{max_lon});
relation["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
);
out body;
>;
out skel qt;
"""
data = await self.query(query)
if data is None:
return []
return data.get("elements", [])
# Singleton
osm_client = OSMClient()

View File

@@ -47,6 +47,24 @@ class CancellationToken:
return self._event.is_set()
# ── Active pool tracking (for graceful shutdown) ──
_active_pool = None # Global ref to current ProcessPoolExecutor
_active_pool_lock = threading.Lock()
def _set_active_pool(pool):
global _active_pool
with _active_pool_lock:
_active_pool = pool
def _clear_active_pool():
global _active_pool
with _active_pool_lock:
_active_pool = None
# ── Worker process cleanup ──
def _clog(msg: str):
@@ -57,10 +75,23 @@ def _clog(msg: str):
def _kill_worker_processes() -> int:
"""Kill ALL rfcp-server processes except the current (main) process.
Uses process NAME matching instead of PID tree because psutil.children()
cannot see grandchildren spawned by ProcessPoolExecutor workers.
First shuts down the active ProcessPoolExecutor (if any), then uses
process NAME matching to kill remaining workers.
Returns the number of processes killed.
"""
global _active_pool
# Step 0: Shut down active ProcessPoolExecutor gracefully
with _active_pool_lock:
pool = _active_pool
_active_pool = None
if pool is not None:
try:
pool.shutdown(wait=False, cancel_futures=True)
_clog("Active ProcessPoolExecutor shutdown requested")
except Exception as e:
_clog(f"Pool shutdown error: {e}")
my_pid = os.getpid()
killed_count = 0
@@ -154,10 +185,12 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
# Build or reuse spatial index (expensive — ~1s for 350K buildings).
cache_key = config.get('cache_key', '')
if _worker_cache_key != cache_key:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
if buildings:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
_worker_spatial_idx.build(buildings)
else:
_worker_spatial_idx = None
_worker_cache_key = cache_key
# Process points
@@ -262,6 +295,7 @@ def calculate_coverage_parallel(
log_fn: Optional[Callable[[str], None]] = None,
cancel_token: Optional[CancellationToken] = None,
precomputed: Optional[Dict] = None,
progress_fn: Optional[Callable[[str, float], None]] = None,
) -> Tuple[List[Dict], Dict[str, float]]:
"""Calculate coverage points in parallel.
@@ -287,6 +321,7 @@ def calculate_coverage_parallel(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token, precomputed,
progress_fn,
)
except Exception as e:
log_fn(f"Ray execution failed: {e} — falling back to sequential")
@@ -300,6 +335,7 @@ def calculate_coverage_parallel(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
pool_workers, log_fn, cancel_token, precomputed,
progress_fn,
)
except Exception as e:
log_fn(f"ProcessPool failed: {e} — falling back to sequential")
@@ -310,6 +346,7 @@ def calculate_coverage_parallel(
grid, point_elevations, site_dict, settings_dict,
buildings, streets, water_bodies, vegetation_areas,
site_elevation, log_fn, cancel_token, precomputed,
progress_fn,
)
@@ -321,6 +358,7 @@ def _calculate_with_ray(
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Execute using Ray shared-memory object store."""
total_points = len(grid)
@@ -404,6 +442,9 @@ def _calculate_with_ray(
eta = (total_points - pts) / rate if rate > 0 else 0
log_fn(f"Progress: {completed_chunks}/{total_chunks} chunks ({pct}%) — "
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
if progress_fn:
# Map chunk progress to 40%-95% range
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / total_chunks))
calc_time = time.time() - t_calc
log_fn(f"Ray done: {calc_time:.1f}s, {len(all_results)} results "
@@ -428,9 +469,10 @@ def _pool_worker_process_chunk(args):
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
from app.services.spatial_index import SpatialIndex
spatial_idx = SpatialIndex()
spatial_idx = None
if buildings:
from app.services.spatial_index import SpatialIndex
spatial_idx = SpatialIndex()
spatial_idx.build(buildings)
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
@@ -465,32 +507,162 @@ def _pool_worker_process_chunk(args):
return results
def _store_terrain_in_shm(terrain_cache: Dict[str, np.ndarray], log_fn) -> Tuple[list, Dict[str, dict]]:
"""Store terrain tile arrays in shared memory. Returns (shm_blocks, tile_refs).
tile_refs is a dict mapping tile_name -> {shm_name, shape, dtype_str}
that workers use to reconstruct numpy arrays from shared memory.
"""
import multiprocessing.shared_memory as shm_mod
blocks = []
refs = {}
for tile_name, arr in terrain_cache.items():
try:
block = shm_mod.SharedMemory(create=True, size=arr.nbytes)
blocks.append(block)
# Copy tile data to shared memory
shm_arr = np.ndarray(arr.shape, dtype=arr.dtype, buffer=block.buf)
shm_arr[:] = arr[:]
refs[tile_name] = {
'shm_name': block.name,
'shape': arr.shape,
'dtype': str(arr.dtype),
}
except Exception as e:
log_fn(f"Failed to store tile {tile_name} in shm: {e}")
# Fallback: worker will have to use pickled copy
pass
return blocks, refs
def _pool_worker_shm_chunk(args):
"""Worker function that reads terrain from shared memory instead of pickle."""
import multiprocessing.shared_memory as shm_mod
chunk, terrain_shm_refs, buildings, osm_data, config = args
# Reconstruct terrain cache from shared memory (zero-copy numpy views)
terrain_cache = {}
for tile_name, ref in terrain_shm_refs.items():
try:
block = shm_mod.SharedMemory(name=ref['shm_name'])
terrain_cache[tile_name] = np.ndarray(
ref['shape'], dtype=ref['dtype'], buffer=block.buf,
)
except Exception:
pass
# Inject terrain cache
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
# Build spatial index
global _worker_spatial_idx, _worker_cache_key
cache_key = config.get('cache_key', '')
if _worker_cache_key != cache_key:
if buildings:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
_worker_spatial_idx.build(buildings)
else:
_worker_spatial_idx = None
_worker_cache_key = cache_key
# Process points
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
site = SiteParams(**config['site_dict'])
settings = CoverageSettings(**config['settings_dict'])
svc = CoverageService()
timing = {
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
}
precomputed = config.get('precomputed')
results = []
for lat, lon, point_elev in chunk:
pre = precomputed.get((lat, lon)) if precomputed else None
point = svc._calculate_point_sync(
site, lat, lon, settings,
buildings, osm_data.get('streets', []),
_worker_spatial_idx, osm_data.get('water_bodies', []),
osm_data.get('vegetation_areas', []),
config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None,
)
if point.rsrp >= settings.min_signal:
results.append(point.model_dump())
return results
def _calculate_with_process_pool(
grid, point_elevations, site_dict, settings_dict,
terrain_cache, buildings, streets, water_bodies,
vegetation_areas, site_elevation,
num_workers, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Execute using ProcessPoolExecutor with reduced workers to limit memory."""
"""Execute using ProcessPoolExecutor.
Uses shared memory for terrain tiles (zero-copy numpy views) to reduce
memory usage compared to pickling full terrain arrays per worker.
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
total_points = len(grid)
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers")
# Estimate pickle size for building data and cap workers accordingly
building_count = len(buildings)
if building_count > 10000:
num_workers = min(num_workers, 3)
log_fn(f"Large building set ({building_count}) — reducing workers to {num_workers}")
elif building_count > 5000:
num_workers = min(num_workers, 4)
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers, "
f"{building_count} buildings")
# Store terrain tiles in shared memory
shm_blocks = []
terrain_shm_refs = {}
try:
shm_blocks, terrain_shm_refs = _store_terrain_in_shm(terrain_cache, log_fn)
if terrain_shm_refs:
tile_mb = sum(
np.prod(r['shape']) * np.dtype(r['dtype']).itemsize
for r in terrain_shm_refs.values()
) / (1024 * 1024)
log_fn(f"Stored {len(terrain_shm_refs)} terrain tiles in shared memory ({tile_mb:.0f} MB)")
use_shm = True
else:
use_shm = False
except Exception as e:
log_fn(f"Shared memory setup failed ({e}), using pickle fallback")
use_shm = False
items = [
(lat, lon, point_elevations.get((lat, lon), 0.0))
for lat, lon in grid
]
# Larger chunks than Ray — fewer workers means bigger chunks
chunk_size = max(1, len(items) // (num_workers * 2))
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
cache_key = f"{site_dict['lat']:.4f},{site_dict['lon']:.4f},{len(buildings)}"
config = {
'site_dict': site_dict,
'settings_dict': settings_dict,
'site_elevation': site_elevation,
'cache_key': cache_key,
}
if precomputed:
config['precomputed'] = precomputed
@@ -505,20 +677,32 @@ def _calculate_with_process_pool(
pool = None
try:
# Use spawn context for clean worker processes
ctx = mp.get_context('spawn')
pool = ProcessPoolExecutor(max_workers=num_workers, mp_context=ctx)
futures = {
pool.submit(
_pool_worker_process_chunk,
(chunk, terrain_cache, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
_set_active_pool(pool)
if use_shm:
# Shared memory path: pass shm refs instead of terrain data
worker_fn = _pool_worker_shm_chunk
futures = {
pool.submit(
worker_fn,
(chunk, terrain_shm_refs, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
else:
# Pickle fallback path
futures = {
pool.submit(
_pool_worker_process_chunk,
(chunk, terrain_cache, buildings, osm_data, config),
): i
for i, chunk in enumerate(chunks)
}
completed_chunks = 0
for future in as_completed(futures):
# Check cancellation between chunks
if cancel_token and cancel_token.is_cancelled:
log_fn(f"Cancelled — cancelling {len(futures) - completed_chunks - 1} pending futures")
for f in futures:
@@ -539,20 +723,27 @@ def _calculate_with_process_pool(
eta = (total_points - pts) / rate if rate > 0 else 0
log_fn(f"Progress: {completed_chunks}/{len(chunks)} chunks ({pct}%) — "
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / len(chunks)))
except Exception as e:
log_fn(f"ProcessPool error: {e}")
finally:
# CRITICAL: Always cleanup pool and orphaned workers
_clear_active_pool()
if pool:
pool.shutdown(wait=False, cancel_futures=True)
# Give pool time to cleanup gracefully
time.sleep(0.5)
# Then force kill any survivors by process name
killed = _kill_worker_processes()
if killed > 0:
log_fn(f"Force killed {killed} orphaned workers")
# Cleanup shared memory blocks
for block in shm_blocks:
try:
block.close()
block.unlink()
except Exception:
pass
calc_time = time.time() - t_calc
log_fn(f"ProcessPool done: {calc_time:.1f}s, {len(all_results)} results "
@@ -561,7 +752,7 @@ def _calculate_with_process_pool(
timing = {
"parallel_total": calc_time,
"workers": num_workers,
"backend": "process_pool",
"backend": "process_pool" + ("/shm" if use_shm else "/pickle"),
}
return all_results, timing
@@ -573,6 +764,7 @@ def _calculate_sequential(
grid, point_elevations, site_dict, settings_dict,
buildings, streets, water_bodies, vegetation_areas,
site_elevation, log_fn, cancel_token=None, precomputed=None,
progress_fn=None,
):
"""Sequential fallback — no extra dependencies, runs in calling thread."""
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
@@ -582,8 +774,9 @@ def _calculate_sequential(
settings = CoverageSettings(**settings_dict)
svc = CoverageService()
spatial_idx = SpatialIndex()
spatial_idx = None
if buildings:
spatial_idx = SpatialIndex()
spatial_idx.build(buildings)
total = len(grid)
@@ -604,6 +797,8 @@ def _calculate_sequential(
if i % log_interval == 0:
log_fn(f"Sequential: {i}/{total} ({i * 100 // total}%)")
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (i / total))
point_elev = point_elevations.get((lat, lon), 0.0)

View File

@@ -1,3 +1,4 @@
import asyncio
import numpy as np
from typing import List, Tuple, Optional
from dataclasses import dataclass
@@ -24,7 +25,10 @@ class StreetCanyonService:
Loss increases at corners/turns.
"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Default street widths by type
STREET_WIDTHS = {
@@ -88,14 +92,24 @@ class StreetCanyonService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Streets] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Streets] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
await asyncio.sleep(2 ** attempt)
else:
print(f"[Streets] All {max_retries} attempts failed")
return []
streets = self._parse_streets(data)

View File

@@ -6,6 +6,7 @@ Uses ITU-R P.833 approximations for foliage loss.
"""
import os
import asyncio
import httpx
import json
from typing import List, Tuple, Optional
@@ -81,7 +82,10 @@ class VegetationCache:
class VegetationService:
"""OSM vegetation for signal attenuation"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Attenuation dB per 100 meters of vegetation
ATTENUATION_DB_PER_100M = {
@@ -127,7 +131,7 @@ class VegetationService:
self._memory_cache[cache_key] = areas
return areas
# Fetch from Overpass
# Fetch from Overpass with retry
print(f"[Vegetation] Fetching from Overpass API...")
query = f"""
@@ -143,14 +147,26 @@ class VegetationService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Vegetation] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Vegetation] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 1s, 2s
print(f"[Vegetation] Retrying in {wait_time}s...")
await asyncio.sleep(wait_time)
else:
print(f"[Vegetation] All {max_retries} attempts failed")
return []
areas = self._parse_response(data)

View File

@@ -6,6 +6,7 @@ or create multipath interference for RF signals.
"""
import os
import asyncio
import httpx
import json
from typing import List, Tuple, Optional
@@ -81,7 +82,10 @@ class WaterCache:
class WaterService:
"""OSM water bodies for reflection calculations"""
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
OVERPASS_URLS = [
"https://overpass-api.de/api/interpreter",
"https://overpass.kumi.systems/api/interpreter",
]
# Reflection coefficients by water type
REFLECTION_COEFF = {
@@ -132,14 +136,24 @@ class WaterService:
out skel qt;
"""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(self.OVERPASS_URL, data={"data": query})
response.raise_for_status()
data = response.json()
except Exception as e:
print(f"[Water] Fetch error: {e}")
return []
data = None
max_retries = 3
for attempt in range(max_retries):
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
try:
timeout = 60.0 * (attempt + 1)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, data={"data": query})
response.raise_for_status()
data = response.json()
break
except Exception as e:
print(f"[Water] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
if attempt < max_retries - 1:
await asyncio.sleep(2 ** attempt)
else:
print(f"[Water] All {max_retries} attempts failed")
return []
bodies = self._parse_response(data)