Compare commits
21 Commits
acc90fe538
...
v1.0-rc-py
| Author | SHA1 | Date | |
|---|---|---|---|
| 833dead43c | |||
| 1d8375af02 | |||
| acfd9b8f7b | |||
| 81e078e92a | |||
| e392b449cc | |||
| 6dcc5a19b9 | |||
| 6cd9d869cc | |||
| a61753c642 | |||
| 20d19d09ae | |||
| 255b91f257 | |||
| 3b36535d4e | |||
| f46bf16428 | |||
| 57106df5ae | |||
| 867ee3d0f4 | |||
| 7f0b4d2269 | |||
| f5429e40fd | |||
| c8c2608266 | |||
| aa07fb5f02 | |||
| b5b2fd90d2 | |||
| defa3ad440 | |||
| 1dde56705a |
@@ -23,7 +23,33 @@
|
||||
"Bash(python3:*)",
|
||||
"Bash(source:*)",
|
||||
"Bash(/mnt/d/root/rfcp/venv/bin/python3:*)",
|
||||
"Bash(node --check:*)"
|
||||
"Bash(node --check:*)",
|
||||
"Bash(/mnt/d/root/rfcp/venv/bin/python -m pytest:*)",
|
||||
"Bash(/mnt/d/root/rfcp/venv/bin/python:*)",
|
||||
"Bash(/mnt/d/root/rfcp/venv/bin/pip list:*)",
|
||||
"Bash(pip3 install numpy)",
|
||||
"Bash(echo:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(node -c:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(head -3 python3 -c \"import numpy; print\\(numpy.__file__\\)\")",
|
||||
"Bash(pip3 install:*)",
|
||||
"Bash(apt list:*)",
|
||||
"Bash(dpkg:*)",
|
||||
"Bash(sudo apt-get install:*)",
|
||||
"Bash(docker:*)",
|
||||
"Bash(~/.local/bin/pip install:*)",
|
||||
"Bash(pgrep:*)",
|
||||
"Bash(kill:*)",
|
||||
"Bash(sort:*)",
|
||||
"Bash(journalctl:*)",
|
||||
"Bash(pkill:*)",
|
||||
"Bash(pip3 list:*)",
|
||||
"Bash(chmod:*)",
|
||||
"Bash(pyinstaller:*)",
|
||||
"Bash(npm i:*)",
|
||||
"Bash(npm uninstall:*)",
|
||||
"Bash(npm rebuild:*)"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -24,3 +24,7 @@ installer/dist/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
nul
|
||||
|
||||
# PyInstaller build artifacts
|
||||
backend/build/
|
||||
backend/dist/
|
||||
|
||||
1513
RFCP-RUST-MIGRATION-PLAN.md
Normal file
1513
RFCP-RUST-MIGRATION-PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
23
RFCP.bat
Normal file
23
RFCP.bat
Normal file
@@ -0,0 +1,23 @@
|
||||
@echo off
|
||||
title RFCP - RF Coverage Planner
|
||||
cd /d "%~dp0"
|
||||
|
||||
REM Check if backend exists
|
||||
if not exist "backend\app\main.py" (
|
||||
echo ERROR: RFCP backend not found.
|
||||
echo Run install.bat first or check your installation.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo ============================================
|
||||
echo RFCP - RF Coverage Planner
|
||||
echo ============================================
|
||||
echo.
|
||||
echo Starting backend server...
|
||||
echo Open http://localhost:8090 in your browser
|
||||
echo Press Ctrl+C to stop
|
||||
echo.
|
||||
|
||||
cd backend
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8090
|
||||
@@ -11,8 +11,10 @@ from app.services.coverage_service import (
|
||||
CoveragePoint,
|
||||
apply_preset,
|
||||
PRESETS,
|
||||
select_propagation_model,
|
||||
)
|
||||
from app.services.parallel_coverage_service import CancellationToken
|
||||
from app.services.boundary_service import calculate_coverage_boundary
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -23,6 +25,12 @@ class CoverageRequest(BaseModel):
|
||||
settings: CoverageSettings = CoverageSettings()
|
||||
|
||||
|
||||
class BoundaryPoint(BaseModel):
|
||||
"""Single boundary coordinate"""
|
||||
lat: float
|
||||
lon: float
|
||||
|
||||
|
||||
class CoverageResponse(BaseModel):
|
||||
"""Coverage calculation response"""
|
||||
points: List[CoveragePoint]
|
||||
@@ -31,6 +39,7 @@ class CoverageResponse(BaseModel):
|
||||
stats: dict
|
||||
computation_time: float # seconds
|
||||
models_used: List[str] # which models were active
|
||||
boundary: Optional[List[BoundaryPoint]] = None # coverage boundary polygon
|
||||
|
||||
|
||||
@router.post("/calculate")
|
||||
@@ -58,12 +67,26 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
|
||||
effective_settings = apply_preset(request.settings.model_copy())
|
||||
models_used = _get_active_models(effective_settings)
|
||||
|
||||
# Add the selected propagation model for the first site's frequency
|
||||
env = getattr(effective_settings, 'environment', 'urban')
|
||||
primary_model = select_propagation_model(request.sites[0].frequency, env)
|
||||
if primary_model.name not in models_used:
|
||||
models_used.insert(0, primary_model.name)
|
||||
|
||||
# Time the calculation
|
||||
start_time = time.time()
|
||||
cancel_token = CancellationToken()
|
||||
|
||||
# Dynamic timeout based on radius (large radius needs more time for tiled processing)
|
||||
radius_m = request.settings.radius
|
||||
if radius_m > 30_000:
|
||||
calc_timeout = 600.0 # 10 min for 30-50km
|
||||
elif radius_m > 10_000:
|
||||
calc_timeout = 480.0 # 8 min for 10-30km
|
||||
else:
|
||||
calc_timeout = 300.0 # 5 min for ≤10km
|
||||
|
||||
try:
|
||||
# Calculate with 5-minute timeout
|
||||
if len(request.sites) == 1:
|
||||
points = await asyncio.wait_for(
|
||||
coverage_service.calculate_coverage(
|
||||
@@ -71,7 +94,7 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
|
||||
request.settings,
|
||||
cancel_token,
|
||||
),
|
||||
timeout=300.0
|
||||
timeout=calc_timeout,
|
||||
)
|
||||
else:
|
||||
points = await asyncio.wait_for(
|
||||
@@ -80,14 +103,15 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
|
||||
request.settings,
|
||||
cancel_token,
|
||||
),
|
||||
timeout=300.0
|
||||
timeout=calc_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
cancel_token.cancel()
|
||||
# Force cleanup orphaned worker processes
|
||||
from app.services.parallel_coverage_service import _kill_worker_processes
|
||||
killed = _kill_worker_processes()
|
||||
detail = f"Calculation timeout (5 min). Cleaned up {killed} workers." if killed else "Calculation timeout (5 min) — try smaller radius or lower resolution"
|
||||
timeout_min = int(calc_timeout / 60)
|
||||
detail = f"Calculation timeout ({timeout_min} min). Cleaned up {killed} workers." if killed else f"Calculation timeout ({timeout_min} min) — try smaller radius or lower resolution"
|
||||
raise HTTPException(408, detail)
|
||||
except asyncio.CancelledError:
|
||||
cancel_token.cancel()
|
||||
@@ -115,13 +139,77 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
|
||||
"points_with_atmospheric_loss": sum(1 for p in points if p.atmospheric_loss > 0),
|
||||
}
|
||||
|
||||
# Calculate coverage boundary
|
||||
boundary = None
|
||||
if points:
|
||||
boundary_coords = calculate_coverage_boundary(
|
||||
[p.model_dump() for p in points],
|
||||
threshold_dbm=request.settings.min_signal,
|
||||
)
|
||||
if boundary_coords:
|
||||
boundary = [BoundaryPoint(**c) for c in boundary_coords]
|
||||
|
||||
return CoverageResponse(
|
||||
points=points,
|
||||
count=len(points),
|
||||
settings=effective_settings,
|
||||
stats=stats,
|
||||
computation_time=round(computation_time, 2),
|
||||
models_used=models_used
|
||||
models_used=models_used,
|
||||
boundary=boundary,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/preview")
|
||||
async def calculate_preview(request: CoverageRequest) -> CoverageResponse:
|
||||
"""
|
||||
Fast radial preview using terrain-only along 360 spokes.
|
||||
|
||||
Returns coverage points much faster than full calculation
|
||||
by skipping building/OSM data and using radial spokes instead of grid.
|
||||
"""
|
||||
if not request.sites:
|
||||
raise HTTPException(400, "At least one site required")
|
||||
|
||||
site = request.sites[0]
|
||||
effective_settings = apply_preset(request.settings.model_copy())
|
||||
|
||||
env = getattr(effective_settings, 'environment', 'urban')
|
||||
primary_model = select_propagation_model(site.frequency, env)
|
||||
models_used = ["terrain_los", primary_model.name]
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
points = await asyncio.wait_for(
|
||||
coverage_service.calculate_radial_preview(
|
||||
site, request.settings,
|
||||
),
|
||||
timeout=30.0,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
raise HTTPException(408, "Preview timeout (30s)")
|
||||
|
||||
computation_time = time.time() - start_time
|
||||
|
||||
rsrp_values = [p.rsrp for p in points]
|
||||
los_count = sum(1 for p in points if p.has_los)
|
||||
|
||||
stats = {
|
||||
"min_rsrp": min(rsrp_values) if rsrp_values else 0,
|
||||
"max_rsrp": max(rsrp_values) if rsrp_values else 0,
|
||||
"avg_rsrp": sum(rsrp_values) / len(rsrp_values) if rsrp_values else 0,
|
||||
"los_percentage": (los_count / len(points) * 100) if points else 0,
|
||||
"mode": "radial_preview",
|
||||
}
|
||||
|
||||
return CoverageResponse(
|
||||
points=points,
|
||||
count=len(points),
|
||||
settings=effective_settings,
|
||||
stats=stats,
|
||||
computation_time=round(computation_time, 2),
|
||||
models_used=models_used,
|
||||
)
|
||||
|
||||
|
||||
@@ -180,9 +268,361 @@ async def get_buildings(
|
||||
}
|
||||
|
||||
|
||||
@router.post("/link-budget")
|
||||
async def calculate_link_budget(request: dict):
|
||||
"""Calculate point-to-point link budget.
|
||||
|
||||
Body: {
|
||||
"tx_lat": 48.46, "tx_lon": 35.04,
|
||||
"tx_power_dbm": 43, "tx_gain_dbi": 18, "tx_cable_loss_db": 2,
|
||||
"tx_height_m": 30,
|
||||
"rx_lat": 48.50, "rx_lon": 35.10,
|
||||
"rx_gain_dbi": 0, "rx_cable_loss_db": 0, "rx_sensitivity_dbm": -100,
|
||||
"rx_height_m": 1.5,
|
||||
"frequency_mhz": 1800
|
||||
}
|
||||
"""
|
||||
import math
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
# Extract parameters with defaults
|
||||
tx_lat = request.get("tx_lat", 48.46)
|
||||
tx_lon = request.get("tx_lon", 35.04)
|
||||
tx_power_dbm = request.get("tx_power_dbm", 43)
|
||||
tx_gain_dbi = request.get("tx_gain_dbi", 18)
|
||||
tx_cable_loss_db = request.get("tx_cable_loss_db", 2)
|
||||
tx_height_m = request.get("tx_height_m", 30)
|
||||
|
||||
rx_lat = request.get("rx_lat", 48.50)
|
||||
rx_lon = request.get("rx_lon", 35.10)
|
||||
rx_gain_dbi = request.get("rx_gain_dbi", 0)
|
||||
rx_cable_loss_db = request.get("rx_cable_loss_db", 0)
|
||||
rx_sensitivity_dbm = request.get("rx_sensitivity_dbm", -100)
|
||||
rx_height_m = request.get("rx_height_m", 1.5)
|
||||
|
||||
freq = request.get("frequency_mhz", 1800)
|
||||
|
||||
# Calculate distance
|
||||
distance_m = terrain_service.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
|
||||
distance_km = distance_m / 1000
|
||||
|
||||
# Get elevations
|
||||
tx_elev = await terrain_service.get_elevation(tx_lat, tx_lon)
|
||||
rx_elev = await terrain_service.get_elevation(rx_lat, rx_lon)
|
||||
|
||||
# EIRP
|
||||
eirp_dbm = tx_power_dbm + tx_gain_dbi - tx_cable_loss_db
|
||||
|
||||
# Free space path loss
|
||||
if distance_km > 0:
|
||||
fspl_db = 20 * math.log10(distance_km) + 20 * math.log10(freq) + 32.45
|
||||
else:
|
||||
fspl_db = 0
|
||||
|
||||
# Terrain profile for LOS check
|
||||
profile = await terrain_service.get_elevation_profile(
|
||||
tx_lat, tx_lon, rx_lat, rx_lon, num_points=100
|
||||
)
|
||||
|
||||
# LOS check - does terrain block line of sight?
|
||||
tx_total_height = tx_elev + tx_height_m
|
||||
rx_total_height = rx_elev + rx_height_m
|
||||
|
||||
terrain_loss_db = 0.0
|
||||
los_clear = True
|
||||
obstructions = []
|
||||
|
||||
for i, point in enumerate(profile):
|
||||
if i == 0 or i == len(profile) - 1:
|
||||
continue
|
||||
# Linear interpolation of LOS line at this point
|
||||
fraction = i / (len(profile) - 1)
|
||||
los_height = tx_total_height + fraction * (rx_total_height - tx_total_height)
|
||||
if point["elevation"] > los_height:
|
||||
los_clear = False
|
||||
obstruction_height = point["elevation"] - los_height
|
||||
obstructions.append({
|
||||
"distance_m": point["distance"],
|
||||
"height_above_los_m": round(obstruction_height, 1),
|
||||
})
|
||||
# Knife-edge diffraction estimate: ~6dB per major obstruction
|
||||
terrain_loss_db += min(6.0, obstruction_height * 0.3)
|
||||
|
||||
# Cap terrain loss at reasonable max
|
||||
terrain_loss_db = min(terrain_loss_db, 40.0)
|
||||
|
||||
total_path_loss = fspl_db + terrain_loss_db
|
||||
|
||||
# Received power
|
||||
rx_power_dbm = eirp_dbm - total_path_loss + rx_gain_dbi - rx_cable_loss_db
|
||||
|
||||
# Link margin
|
||||
margin_db = rx_power_dbm - rx_sensitivity_dbm
|
||||
|
||||
return {
|
||||
"distance_km": round(distance_km, 2),
|
||||
"distance_m": round(distance_m, 1),
|
||||
"tx_elevation_m": round(tx_elev, 1),
|
||||
"rx_elevation_m": round(rx_elev, 1),
|
||||
"eirp_dbm": round(eirp_dbm, 1),
|
||||
"fspl_db": round(fspl_db, 1),
|
||||
"terrain_loss_db": round(terrain_loss_db, 1),
|
||||
"total_path_loss_db": round(total_path_loss, 1),
|
||||
"los_clear": los_clear,
|
||||
"obstructions": obstructions,
|
||||
"rx_power_dbm": round(rx_power_dbm, 1),
|
||||
"margin_db": round(margin_db, 1),
|
||||
"status": "OK" if margin_db >= 0 else "FAIL",
|
||||
"link_budget": {
|
||||
"tx_power_dbm": tx_power_dbm,
|
||||
"tx_gain_dbi": tx_gain_dbi,
|
||||
"tx_cable_loss_db": tx_cable_loss_db,
|
||||
"rx_gain_dbi": rx_gain_dbi,
|
||||
"rx_cable_loss_db": rx_cable_loss_db,
|
||||
"rx_sensitivity_dbm": rx_sensitivity_dbm,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@router.post("/fresnel-profile")
|
||||
async def fresnel_profile(request: dict):
|
||||
"""Calculate terrain profile with Fresnel zone boundaries.
|
||||
|
||||
Body: {
|
||||
"tx_lat": 48.46, "tx_lon": 35.04, "tx_height_m": 30,
|
||||
"rx_lat": 48.50, "rx_lon": 35.10, "rx_height_m": 1.5,
|
||||
"frequency_mhz": 1800,
|
||||
"num_points": 100
|
||||
}
|
||||
"""
|
||||
import math
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
tx_lat = request.get("tx_lat", 48.46)
|
||||
tx_lon = request.get("tx_lon", 35.04)
|
||||
rx_lat = request.get("rx_lat", 48.50)
|
||||
rx_lon = request.get("rx_lon", 35.10)
|
||||
tx_height = request.get("tx_height_m", 30)
|
||||
rx_height = request.get("rx_height_m", 1.5)
|
||||
freq = request.get("frequency_mhz", 1800)
|
||||
num_points = request.get("num_points", 100)
|
||||
|
||||
# Get terrain profile
|
||||
profile = await terrain_service.get_elevation_profile(
|
||||
tx_lat, tx_lon, rx_lat, rx_lon, num_points
|
||||
)
|
||||
|
||||
if not profile:
|
||||
return {"error": "Could not generate terrain profile"}
|
||||
|
||||
total_distance = profile[-1]["distance"] if profile else 0
|
||||
|
||||
# Get endpoint elevations
|
||||
tx_elev = profile[0]["elevation"]
|
||||
rx_elev = profile[-1]["elevation"]
|
||||
tx_total = tx_elev + tx_height
|
||||
rx_total = rx_elev + rx_height
|
||||
|
||||
wavelength = 300.0 / freq # meters
|
||||
|
||||
# Calculate Fresnel zone at each profile point
|
||||
fresnel_data = []
|
||||
los_blocked = False
|
||||
fresnel_blocked = False
|
||||
worst_clearance = float('inf')
|
||||
fresnel_intrusion_count = 0
|
||||
|
||||
for i, point in enumerate(profile):
|
||||
d1 = point["distance"] # distance from tx
|
||||
d2 = total_distance - d1 # distance to rx
|
||||
|
||||
# LOS height at this point (linear interpolation)
|
||||
if total_distance > 0:
|
||||
fraction = d1 / total_distance
|
||||
else:
|
||||
fraction = 0
|
||||
los_height = tx_total + fraction * (rx_total - tx_total)
|
||||
|
||||
# First Fresnel zone radius
|
||||
if d1 > 0 and d2 > 0 and total_distance > 0:
|
||||
f1_radius = math.sqrt((1 * wavelength * d1 * d2) / total_distance)
|
||||
else:
|
||||
f1_radius = 0
|
||||
|
||||
# Fresnel zone boundaries (height above sea level)
|
||||
fresnel_top = los_height + f1_radius
|
||||
fresnel_bottom = los_height - f1_radius
|
||||
|
||||
# Clearance: how much space between terrain and Fresnel bottom
|
||||
clearance = fresnel_bottom - point["elevation"]
|
||||
|
||||
if clearance < worst_clearance:
|
||||
worst_clearance = clearance
|
||||
|
||||
if point["elevation"] > los_height:
|
||||
los_blocked = True
|
||||
if point["elevation"] > fresnel_bottom:
|
||||
fresnel_blocked = True
|
||||
fresnel_intrusion_count += 1
|
||||
|
||||
fresnel_data.append({
|
||||
"distance": round(point["distance"], 1),
|
||||
"lat": point["lat"],
|
||||
"lon": point["lon"],
|
||||
"terrain_elevation": round(point["elevation"], 1),
|
||||
"los_height": round(los_height, 1),
|
||||
"fresnel_top": round(fresnel_top, 1),
|
||||
"fresnel_bottom": round(fresnel_bottom, 1),
|
||||
"f1_radius": round(f1_radius, 1),
|
||||
"clearance": round(clearance, 1),
|
||||
})
|
||||
|
||||
# Calculate Fresnel clearance percentage
|
||||
fresnel_clear_pct = round(100 * (1 - fresnel_intrusion_count / len(profile)), 1) if profile else 100
|
||||
|
||||
# Estimate additional loss due to Fresnel obstruction
|
||||
if los_blocked:
|
||||
estimated_loss_db = 10 + abs(worst_clearance) * 0.5 # rough estimate
|
||||
elif fresnel_blocked:
|
||||
estimated_loss_db = 3 + (100 - fresnel_clear_pct) * 0.06 # 3-6 dB typical
|
||||
else:
|
||||
estimated_loss_db = 0
|
||||
|
||||
return {
|
||||
"profile": fresnel_data,
|
||||
"total_distance_m": round(total_distance, 1),
|
||||
"tx_elevation": round(tx_elev, 1),
|
||||
"rx_elevation": round(rx_elev, 1),
|
||||
"frequency_mhz": freq,
|
||||
"wavelength_m": round(wavelength, 4),
|
||||
"los_clear": not los_blocked,
|
||||
"fresnel_clear": not fresnel_blocked,
|
||||
"fresnel_clear_pct": fresnel_clear_pct,
|
||||
"worst_clearance_m": round(worst_clearance, 1),
|
||||
"estimated_loss_db": round(estimated_loss_db, 1),
|
||||
"recommendation": (
|
||||
"Clear — excellent link" if not fresnel_blocked
|
||||
else "Fresnel zone partially blocked — expect 3-6 dB additional loss"
|
||||
if not los_blocked
|
||||
else "LOS blocked — significant diffraction loss expected"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/interference")
|
||||
async def calculate_interference(request: CoverageRequest):
|
||||
"""Calculate C/I (carrier-to-interference) ratio for multi-site scenario.
|
||||
|
||||
Uses the same request format as /calculate but returns interference analysis
|
||||
instead of raw coverage. Requires 2+ sites to be meaningful.
|
||||
|
||||
Returns for each grid point:
|
||||
- C/I ratio (carrier to interference) in dB
|
||||
- Best server index
|
||||
- Best server RSRP
|
||||
"""
|
||||
import numpy as np
|
||||
from app.services.gpu_service import gpu_service
|
||||
|
||||
if len(request.sites) < 2:
|
||||
raise HTTPException(400, "At least 2 sites required for interference analysis")
|
||||
|
||||
if len(request.sites) > 10:
|
||||
raise HTTPException(400, "Maximum 10 sites per request")
|
||||
|
||||
# First calculate coverage for all sites
|
||||
start_time = time.time()
|
||||
cancel_token = CancellationToken()
|
||||
|
||||
try:
|
||||
# Calculate coverage for each site individually
|
||||
site_results = []
|
||||
for site in request.sites:
|
||||
points = await asyncio.wait_for(
|
||||
coverage_service.calculate_coverage(
|
||||
site,
|
||||
request.settings,
|
||||
cancel_token,
|
||||
),
|
||||
timeout=120.0, # 2 min per site
|
||||
)
|
||||
site_results.append(points)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
cancel_token.cancel()
|
||||
raise HTTPException(408, "Calculation timeout")
|
||||
|
||||
computation_time = time.time() - start_time
|
||||
|
||||
# Build coordinate -> RSRP mapping for each site
|
||||
# We need to align the grids (same points for all sites)
|
||||
coord_set = set()
|
||||
for points in site_results:
|
||||
for p in points:
|
||||
coord_set.add((round(p.lat, 6), round(p.lon, 6)))
|
||||
|
||||
coord_list = sorted(coord_set)
|
||||
|
||||
# Build RSRP arrays aligned to coord_list
|
||||
rsrp_grids = []
|
||||
frequencies = []
|
||||
for idx, (site, points) in enumerate(zip(request.sites, site_results)):
|
||||
# Map coordinates to RSRP
|
||||
point_map = {(round(p.lat, 6), round(p.lon, 6)): p.rsrp for p in points}
|
||||
rsrp_array = np.array([
|
||||
point_map.get(coord, -150) # -150 dBm = no coverage
|
||||
for coord in coord_list
|
||||
], dtype=np.float64)
|
||||
rsrp_grids.append(rsrp_array)
|
||||
frequencies.append(site.frequency)
|
||||
|
||||
# Calculate C/I using GPU service
|
||||
ci_ratio, best_server_idx, best_rsrp = gpu_service.calculate_interference_vectorized(
|
||||
rsrp_grids, frequencies
|
||||
)
|
||||
|
||||
# Build result points with C/I data
|
||||
ci_points = []
|
||||
for i, (lat, lon) in enumerate(coord_list):
|
||||
ci_points.append({
|
||||
"lat": lat,
|
||||
"lon": lon,
|
||||
"ci_ratio_db": round(float(ci_ratio[i]), 1),
|
||||
"best_server_idx": int(best_server_idx[i]),
|
||||
"best_server_rsrp": round(float(best_rsrp[i]), 1),
|
||||
})
|
||||
|
||||
# Calculate statistics
|
||||
ci_values = [p["ci_ratio_db"] for p in ci_points]
|
||||
stats = {
|
||||
"min_ci_db": round(min(ci_values), 1) if ci_values else 0,
|
||||
"max_ci_db": round(max(ci_values), 1) if ci_values else 0,
|
||||
"avg_ci_db": round(sum(ci_values) / len(ci_values), 1) if ci_values else 0,
|
||||
"good_coverage_pct": round(100 * sum(1 for c in ci_values if c >= 10) / len(ci_values), 1) if ci_values else 0,
|
||||
"marginal_coverage_pct": round(100 * sum(1 for c in ci_values if 0 <= c < 10) / len(ci_values), 1) if ci_values else 0,
|
||||
"interference_dominant_pct": round(100 * sum(1 for c in ci_values if c < 0) / len(ci_values), 1) if ci_values else 0,
|
||||
}
|
||||
|
||||
# Check for frequency groups
|
||||
unique_freqs = set(frequencies)
|
||||
freq_groups = {}
|
||||
for freq in unique_freqs:
|
||||
freq_groups[freq] = sum(1 for f in frequencies if f == freq)
|
||||
|
||||
return {
|
||||
"points": ci_points,
|
||||
"count": len(ci_points),
|
||||
"stats": stats,
|
||||
"computation_time": round(computation_time, 2),
|
||||
"sites": [{"name": s.name, "frequency_mhz": s.frequency} for s in request.sites],
|
||||
"frequency_groups": freq_groups,
|
||||
"warning": None if any(c > 1 for c in freq_groups.values()) else "All sites on different frequencies - no co-channel interference",
|
||||
}
|
||||
|
||||
|
||||
def _get_active_models(settings: CoverageSettings) -> List[str]:
|
||||
"""Determine which propagation models are active"""
|
||||
models = ["okumura_hata"] # Always active as base model
|
||||
models = [] # Base propagation model added by caller via select_propagation_model()
|
||||
|
||||
if settings.use_terrain:
|
||||
models.append("terrain_los")
|
||||
|
||||
41
backend/app/api/routes/gpu.py
Normal file
41
backend/app/api/routes/gpu.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""GPU management API endpoints."""
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.services.gpu_backend import gpu_manager
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class SetDeviceRequest(BaseModel):
|
||||
backend: str
|
||||
index: int = 0
|
||||
|
||||
|
||||
@router.get("/status")
|
||||
async def gpu_status():
|
||||
"""Return GPU manager status: active backend, device, available devices."""
|
||||
return gpu_manager.get_status()
|
||||
|
||||
|
||||
@router.get("/devices")
|
||||
async def gpu_devices():
|
||||
"""Return list of available compute devices."""
|
||||
return {"devices": gpu_manager.get_devices()}
|
||||
|
||||
|
||||
@router.post("/set")
|
||||
async def gpu_set_device(request: SetDeviceRequest):
|
||||
"""Switch active compute device."""
|
||||
try:
|
||||
result = gpu_manager.set_device(request.backend, request.index)
|
||||
return {"status": "ok", **result}
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/diagnostics")
|
||||
async def gpu_diagnostics():
|
||||
"""Full GPU diagnostic info for troubleshooting detection issues."""
|
||||
return gpu_manager.get_diagnostics()
|
||||
@@ -1,12 +1,29 @@
|
||||
import sys
|
||||
import platform
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from app.api.deps import get_db
|
||||
from app.services.gpu_backend import gpu_manager
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/")
|
||||
async def health_check():
|
||||
return {"status": "ok", "service": "rfcp-backend", "version": "1.1.0"}
|
||||
gpu_info = gpu_manager.get_status()
|
||||
return {
|
||||
"status": "ok",
|
||||
"service": "rfcp-backend",
|
||||
"version": "3.6.0",
|
||||
"build": "gpu" if gpu_info.get("gpu_available") else "cpu",
|
||||
"gpu": {
|
||||
"available": gpu_info.get("gpu_available", False),
|
||||
"backend": gpu_info.get("active_backend", "cpu"),
|
||||
"device": gpu_info.get("active_device", {}).get("name") if gpu_info.get("active_device") else "CPU",
|
||||
},
|
||||
"python": sys.version.split()[0],
|
||||
"platform": platform.system(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/db")
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
import multiprocessing as mp
|
||||
from pathlib import Path
|
||||
from fastapi import APIRouter
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# Valid SRTM tile sizes (bytes)
|
||||
_SRTM1_SIZE = 3601 * 3601 * 2 # 25,934,402
|
||||
_SRTM3_SIZE = 1201 * 1201 * 2 # 2,884,802
|
||||
|
||||
|
||||
@router.get("/info")
|
||||
async def get_system_info():
|
||||
@@ -46,15 +52,134 @@ async def get_system_info():
|
||||
}
|
||||
|
||||
|
||||
@router.get("/models")
|
||||
async def get_propagation_models():
|
||||
"""Return available propagation models and their valid ranges."""
|
||||
from app.core.engine import engine
|
||||
return {
|
||||
"models": engine.get_available_models(),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/shutdown")
|
||||
async def shutdown():
|
||||
"""Graceful shutdown endpoint. Kills worker processes and exits."""
|
||||
"""Graceful shutdown endpoint. Kills worker processes then self-terminates.
|
||||
|
||||
Electron calls this first, waits briefly, then does PID-tree kill.
|
||||
The os._exit(3s) is a safety net in case Electron doesn't kill us.
|
||||
"""
|
||||
from app.services.parallel_coverage_service import _kill_worker_processes
|
||||
|
||||
killed = _kill_worker_processes()
|
||||
|
||||
# Schedule hard exit after response is sent
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.call_later(0.5, lambda: os._exit(0))
|
||||
# Safety net: self-terminate after 3s if Electron doesn't kill us.
|
||||
# Delay is long enough for Electron to do PID-tree kill first (preferred).
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.call_later(3.0, lambda: os._exit(0))
|
||||
|
||||
return {"status": "shutting down", "workers_killed": killed}
|
||||
|
||||
|
||||
@router.get("/diagnostics")
|
||||
async def get_diagnostics():
|
||||
"""Validate terrain tiles and OSM cache files.
|
||||
|
||||
Checks:
|
||||
- Terrain .hgt files: must be exactly SRTM1 or SRTM3 size
|
||||
- OSM cache .json files: must be valid JSON with expected structure
|
||||
- Cache manager stats (memory + disk)
|
||||
"""
|
||||
data_path = Path(os.environ.get('RFCP_DATA_PATH', './data'))
|
||||
terrain_path = data_path / 'terrain'
|
||||
osm_dirs = [
|
||||
data_path / 'osm' / 'buildings',
|
||||
data_path / 'osm' / 'streets',
|
||||
data_path / 'osm' / 'vegetation',
|
||||
data_path / 'osm' / 'water',
|
||||
]
|
||||
|
||||
# --- Terrain tiles ---
|
||||
terrain_tiles = []
|
||||
terrain_errors = []
|
||||
total_terrain_bytes = 0
|
||||
|
||||
if terrain_path.exists():
|
||||
for hgt in sorted(terrain_path.glob("*.hgt")):
|
||||
size = hgt.stat().st_size
|
||||
total_terrain_bytes += size
|
||||
if size == _SRTM1_SIZE:
|
||||
terrain_tiles.append({"name": hgt.name, "type": "SRTM1", "size": size})
|
||||
elif size == _SRTM3_SIZE:
|
||||
terrain_tiles.append({"name": hgt.name, "type": "SRTM3", "size": size})
|
||||
else:
|
||||
terrain_errors.append({
|
||||
"name": hgt.name,
|
||||
"size": size,
|
||||
"error": f"Invalid size (expected {_SRTM1_SIZE} or {_SRTM3_SIZE})",
|
||||
})
|
||||
|
||||
# --- OSM cache ---
|
||||
osm_files = []
|
||||
osm_errors = []
|
||||
total_osm_bytes = 0
|
||||
|
||||
for osm_dir in osm_dirs:
|
||||
if not osm_dir.exists():
|
||||
continue
|
||||
category = osm_dir.name
|
||||
for jf in sorted(osm_dir.glob("*.json")):
|
||||
fsize = jf.stat().st_size
|
||||
total_osm_bytes += fsize
|
||||
try:
|
||||
data = json.loads(jf.read_text())
|
||||
has_timestamp = '_cached_at' in data or '_ts' in data
|
||||
has_data = 'data' in data or 'v' in data
|
||||
if has_timestamp and has_data:
|
||||
osm_files.append({
|
||||
"name": jf.name,
|
||||
"category": category,
|
||||
"size": fsize,
|
||||
"valid": True,
|
||||
})
|
||||
else:
|
||||
osm_errors.append({
|
||||
"name": jf.name,
|
||||
"category": category,
|
||||
"size": fsize,
|
||||
"error": "Missing expected keys (_cached_at/data or _ts/v)",
|
||||
})
|
||||
except json.JSONDecodeError as e:
|
||||
osm_errors.append({
|
||||
"name": jf.name,
|
||||
"category": category,
|
||||
"size": fsize,
|
||||
"error": f"Invalid JSON: {e}",
|
||||
})
|
||||
|
||||
# --- Cache manager stats ---
|
||||
try:
|
||||
from app.services.cache import cache_manager
|
||||
cache_stats = cache_manager.stats()
|
||||
except Exception:
|
||||
cache_stats = None
|
||||
|
||||
return {
|
||||
"data_path": str(data_path),
|
||||
"terrain": {
|
||||
"path": str(terrain_path),
|
||||
"exists": terrain_path.exists(),
|
||||
"tile_count": len(terrain_tiles),
|
||||
"error_count": len(terrain_errors),
|
||||
"total_mb": round(total_terrain_bytes / (1024 * 1024), 1),
|
||||
"tiles": terrain_tiles,
|
||||
"errors": terrain_errors,
|
||||
},
|
||||
"osm_cache": {
|
||||
"valid_count": len(osm_files),
|
||||
"error_count": len(osm_errors),
|
||||
"total_mb": round(total_osm_bytes / (1024 * 1024), 1),
|
||||
"files": osm_files,
|
||||
"errors": osm_errors,
|
||||
},
|
||||
"cache_manager": cache_stats,
|
||||
}
|
||||
|
||||
@@ -180,3 +180,93 @@ async def get_terrain_file(region: str):
|
||||
if os.path.exists(terrain_path):
|
||||
return FileResponse(terrain_path)
|
||||
raise HTTPException(status_code=404, detail=f"Region '{region}' not found")
|
||||
|
||||
|
||||
@router.get("/status")
|
||||
async def terrain_status():
|
||||
"""Return terrain data availability info."""
|
||||
cached_tiles = terrain_service.get_cached_tiles()
|
||||
cache_size = terrain_service.get_cache_size_mb()
|
||||
|
||||
# Categorize by resolution based on file size
|
||||
srtm1_tiles = []
|
||||
srtm3_tiles = []
|
||||
for t in cached_tiles:
|
||||
tile_path = terrain_service.terrain_path / f"{t}.hgt"
|
||||
try:
|
||||
if tile_path.stat().st_size == 3601 * 3601 * 2:
|
||||
srtm1_tiles.append(t)
|
||||
else:
|
||||
srtm3_tiles.append(t)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {
|
||||
"total_tiles": len(cached_tiles),
|
||||
"srtm1": {
|
||||
"count": len(srtm1_tiles),
|
||||
"resolution_m": 30,
|
||||
"tiles": sorted(srtm1_tiles),
|
||||
},
|
||||
"srtm3": {
|
||||
"count": len(srtm3_tiles),
|
||||
"resolution_m": 90,
|
||||
"tiles": sorted(srtm3_tiles),
|
||||
},
|
||||
"cache_size_mb": round(cache_size, 1),
|
||||
"memory_cached": len(terrain_service._tile_cache),
|
||||
"terra_server": "https://terra.eliah.one",
|
||||
}
|
||||
|
||||
|
||||
@router.post("/download")
|
||||
async def terrain_download(request: dict):
|
||||
"""Pre-download tiles for a region.
|
||||
|
||||
Body: {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 50}
|
||||
Or: {"tiles": ["N48E034", "N48E035", "N47E034", "N47E035"]}
|
||||
"""
|
||||
if "tiles" in request:
|
||||
tile_list = request["tiles"]
|
||||
else:
|
||||
center_lat = request.get("center_lat", 48.46)
|
||||
center_lon = request.get("center_lon", 35.04)
|
||||
radius_km = request.get("radius_km", 50)
|
||||
tile_list = terrain_service.get_required_tiles(center_lat, center_lon, radius_km)
|
||||
|
||||
missing = [t for t in tile_list if not terrain_service.get_tile_path(t).exists()]
|
||||
|
||||
if not missing:
|
||||
return {"status": "ok", "message": "All tiles already cached", "count": len(tile_list)}
|
||||
|
||||
# Download missing tiles
|
||||
downloaded = []
|
||||
failed = []
|
||||
for tile_name in missing:
|
||||
success = await terrain_service.download_tile(tile_name)
|
||||
if success:
|
||||
downloaded.append(tile_name)
|
||||
else:
|
||||
failed.append(tile_name)
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"required": len(tile_list),
|
||||
"already_cached": len(tile_list) - len(missing),
|
||||
"downloaded": downloaded,
|
||||
"failed": failed,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/index")
|
||||
async def terrain_index():
|
||||
"""Fetch tile index from terra server."""
|
||||
import httpx
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.get("https://terra.eliah.one/api/index")
|
||||
if resp.status_code == 200:
|
||||
return resp.json()
|
||||
except Exception:
|
||||
pass
|
||||
return {"error": "Could not reach terra.eliah.one", "offline": True}
|
||||
|
||||
306
backend/app/api/websocket.py
Normal file
306
backend/app/api/websocket.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""
|
||||
WebSocket handler for real-time coverage calculation with progress.
|
||||
|
||||
Uses the same coverage_service pipeline as the HTTP endpoint but sends
|
||||
progress updates during computation phases.
|
||||
"""
|
||||
|
||||
import time
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import WebSocket, WebSocketDisconnect
|
||||
|
||||
from app.services.coverage_service import (
|
||||
coverage_service, SiteParams, CoverageSettings, apply_preset,
|
||||
select_propagation_model,
|
||||
)
|
||||
from app.services.parallel_coverage_service import CancellationToken
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConnectionManager:
|
||||
"""Track cancellation tokens per calculation."""
|
||||
|
||||
def __init__(self):
|
||||
self._cancel_tokens: dict[str, CancellationToken] = {}
|
||||
|
||||
async def send_progress(
|
||||
self, ws: WebSocket, calc_id: str,
|
||||
phase: str, progress: float, eta: Optional[float] = None,
|
||||
):
|
||||
try:
|
||||
await ws.send_json({
|
||||
"type": "progress",
|
||||
"calculation_id": calc_id,
|
||||
"phase": phase,
|
||||
"progress": min(progress, 1.0),
|
||||
"eta_seconds": eta,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"[WS] send_progress failed: {e}")
|
||||
|
||||
async def send_result(self, ws: WebSocket, calc_id: str, result: dict):
|
||||
try:
|
||||
await ws.send_json({
|
||||
"type": "result",
|
||||
"calculation_id": calc_id,
|
||||
"data": result,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning(f"[WS] send_result failed: {e}")
|
||||
|
||||
async def send_error(self, ws: WebSocket, calc_id: str, error: str):
|
||||
try:
|
||||
await ws.send_json({
|
||||
"type": "error",
|
||||
"calculation_id": calc_id,
|
||||
"message": error,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning(f"[WS] send_error failed: {e}")
|
||||
|
||||
async def send_partial_results(
|
||||
self, ws: WebSocket, calc_id: str,
|
||||
points: list, tile_idx: int, total_tiles: int,
|
||||
):
|
||||
"""Send per-tile partial results for progressive rendering."""
|
||||
try:
|
||||
await ws.send_json({
|
||||
"type": "partial_results",
|
||||
"calculation_id": calc_id,
|
||||
"points": [p.model_dump() for p in points],
|
||||
"tile": tile_idx,
|
||||
"total_tiles": total_tiles,
|
||||
"progress": (tile_idx + 1) / total_tiles,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"[WS] send_partial_results failed: {e}")
|
||||
|
||||
|
||||
ws_manager = ConnectionManager()
|
||||
|
||||
|
||||
async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
|
||||
"""Run coverage calculation with progress updates via WebSocket."""
|
||||
cancel_token = CancellationToken()
|
||||
ws_manager._cancel_tokens[calc_id] = cancel_token
|
||||
|
||||
# Shared progress state — written by worker threads, polled by event loop.
|
||||
# Python GIL makes dict value assignment atomic for simple types.
|
||||
_progress = {"phase": "Initializing", "pct": 0.0, "seq": 0}
|
||||
_done = False
|
||||
|
||||
# Get event loop for cross-thread scheduling of WS sends.
|
||||
loop = asyncio.get_running_loop()
|
||||
_last_direct_pct = 0.0
|
||||
_last_direct_phase = ""
|
||||
|
||||
def sync_progress_fn(phase: str, pct: float, _eta: Optional[float] = None):
|
||||
"""Thread-safe progress callback — updates dict AND schedules direct WS send."""
|
||||
nonlocal _last_direct_pct, _last_direct_phase
|
||||
_progress["phase"] = phase
|
||||
_progress["pct"] = pct
|
||||
_progress["seq"] += 1
|
||||
# Schedule direct WS send via event loop (works from any thread).
|
||||
# Throttle: only send on phase change or >=2% progress.
|
||||
if phase != _last_direct_phase or pct - _last_direct_pct >= 0.02:
|
||||
_last_direct_pct = pct
|
||||
_last_direct_phase = phase
|
||||
try:
|
||||
loop.call_soon_threadsafe(
|
||||
asyncio.ensure_future,
|
||||
ws_manager.send_progress(ws, calc_id, phase, pct),
|
||||
)
|
||||
except RuntimeError:
|
||||
pass # Event loop closed
|
||||
|
||||
try:
|
||||
sites_data = data.get("sites", [])
|
||||
settings_data = data.get("settings", {})
|
||||
|
||||
if not sites_data:
|
||||
await ws_manager.send_error(ws, calc_id, "At least one site required")
|
||||
return
|
||||
|
||||
if len(sites_data) > 10:
|
||||
await ws_manager.send_error(ws, calc_id, "Maximum 10 sites per request")
|
||||
return
|
||||
|
||||
# Parse sites and settings (same format as HTTP endpoint)
|
||||
sites = [SiteParams(**s) for s in sites_data]
|
||||
settings = CoverageSettings(**settings_data)
|
||||
|
||||
if settings.radius > 50000:
|
||||
await ws_manager.send_error(ws, calc_id, "Maximum radius 50km")
|
||||
return
|
||||
if settings.resolution < 50:
|
||||
await ws_manager.send_error(ws, calc_id, "Minimum resolution 50m")
|
||||
return
|
||||
|
||||
effective_settings = apply_preset(settings.model_copy())
|
||||
|
||||
# Determine models used
|
||||
from app.api.routes.coverage import _get_active_models
|
||||
models_used = _get_active_models(effective_settings)
|
||||
env = getattr(effective_settings, 'environment', 'urban')
|
||||
primary_model = select_propagation_model(sites[0].frequency, env)
|
||||
if primary_model.name not in models_used:
|
||||
models_used.insert(0, primary_model.name)
|
||||
|
||||
await ws_manager.send_progress(ws, calc_id, "Initializing", 0.02)
|
||||
|
||||
# ── Tile callback for progressive results (large radius) ──
|
||||
async def _tile_callback(tile_points, tile_idx, total_tiles):
|
||||
await ws_manager.send_partial_results(
|
||||
ws, calc_id, tile_points, tile_idx, total_tiles,
|
||||
)
|
||||
|
||||
# ── Backup progress poller: catches anything call_soon_threadsafe missed ──
|
||||
async def progress_poller():
|
||||
last_sent_seq = 0
|
||||
last_sent_pct = 0.0
|
||||
last_sent_phase = "Initializing"
|
||||
while not _done:
|
||||
await asyncio.sleep(0.5)
|
||||
seq = _progress["seq"]
|
||||
pct = _progress["pct"]
|
||||
phase = _progress["phase"]
|
||||
# Send on any phase change OR >=3% progress (primary sends handle fine-grained)
|
||||
if seq != last_sent_seq and (
|
||||
phase != last_sent_phase
|
||||
or pct - last_sent_pct >= 0.03
|
||||
):
|
||||
await ws_manager.send_progress(ws, calc_id, phase, pct)
|
||||
last_sent_seq = seq
|
||||
last_sent_pct = pct
|
||||
last_sent_phase = phase
|
||||
|
||||
poller_task = asyncio.create_task(progress_poller())
|
||||
|
||||
# Dynamic timeout based on radius
|
||||
radius_m = settings.radius
|
||||
if radius_m > 30_000:
|
||||
calc_timeout = 600.0 # 10 min for 30-50km
|
||||
elif radius_m > 10_000:
|
||||
calc_timeout = 480.0 # 8 min for 10-30km
|
||||
else:
|
||||
calc_timeout = 300.0 # 5 min for ≤10km
|
||||
|
||||
# Run calculation with timeout
|
||||
start_time = time.time()
|
||||
try:
|
||||
if len(sites) == 1:
|
||||
points = await asyncio.wait_for(
|
||||
coverage_service.calculate_coverage(
|
||||
sites[0], settings, cancel_token,
|
||||
progress_fn=sync_progress_fn,
|
||||
tile_callback=_tile_callback,
|
||||
),
|
||||
timeout=calc_timeout,
|
||||
)
|
||||
else:
|
||||
points = await asyncio.wait_for(
|
||||
coverage_service.calculate_multi_site_coverage(
|
||||
sites, settings, cancel_token,
|
||||
progress_fn=sync_progress_fn,
|
||||
tile_callback=_tile_callback,
|
||||
),
|
||||
timeout=calc_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
cancel_token.cancel()
|
||||
_done = True
|
||||
await poller_task
|
||||
from app.services.parallel_coverage_service import _kill_worker_processes
|
||||
_kill_worker_processes()
|
||||
timeout_min = int(calc_timeout / 60)
|
||||
await ws_manager.send_error(ws, calc_id, f"Calculation timeout ({timeout_min} min)")
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
cancel_token.cancel()
|
||||
_done = True
|
||||
await poller_task
|
||||
await ws_manager.send_error(ws, calc_id, "Calculation cancelled")
|
||||
return
|
||||
|
||||
# Stop poller and send final progress
|
||||
_done = True
|
||||
await poller_task
|
||||
|
||||
computation_time = time.time() - start_time
|
||||
|
||||
# Build response (identical format to HTTP endpoint)
|
||||
rsrp_values = [p.rsrp for p in points]
|
||||
los_count = sum(1 for p in points if p.has_los)
|
||||
|
||||
stats = {
|
||||
"min_rsrp": min(rsrp_values) if rsrp_values else 0,
|
||||
"max_rsrp": max(rsrp_values) if rsrp_values else 0,
|
||||
"avg_rsrp": sum(rsrp_values) / len(rsrp_values) if rsrp_values else 0,
|
||||
"los_percentage": (los_count / len(points) * 100) if points else 0,
|
||||
"points_with_buildings": sum(1 for p in points if p.building_loss > 0),
|
||||
"points_with_terrain_loss": sum(1 for p in points if p.terrain_loss > 0),
|
||||
"points_with_reflection_gain": sum(1 for p in points if p.reflection_gain > 0),
|
||||
"points_with_vegetation_loss": sum(1 for p in points if p.vegetation_loss > 0),
|
||||
"points_with_rain_loss": sum(1 for p in points if p.rain_loss > 0),
|
||||
"points_with_indoor_loss": sum(1 for p in points if p.indoor_loss > 0),
|
||||
"points_with_atmospheric_loss": sum(1 for p in points if p.atmospheric_loss > 0),
|
||||
}
|
||||
|
||||
result = {
|
||||
"points": [p.model_dump() for p in points],
|
||||
"count": len(points),
|
||||
"settings": effective_settings.model_dump(),
|
||||
"stats": stats,
|
||||
"computation_time": round(computation_time, 2),
|
||||
"models_used": models_used,
|
||||
}
|
||||
|
||||
# Send "Complete" before result so frontend shows 100%
|
||||
await ws_manager.send_progress(ws, calc_id, "Complete", 1.0)
|
||||
await ws_manager.send_result(ws, calc_id, result)
|
||||
logger.info(f"[WS] calc={calc_id} done: {len(points)} pts, {computation_time:.1f}s")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[WS] Calculation error: {e}", exc_info=True)
|
||||
_done = True
|
||||
try:
|
||||
await poller_task
|
||||
except Exception:
|
||||
pass
|
||||
await ws_manager.send_error(ws, calc_id, str(e))
|
||||
finally:
|
||||
ws_manager._cancel_tokens.pop(calc_id, None)
|
||||
|
||||
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
"""WebSocket endpoint for coverage calculations with progress."""
|
||||
await websocket.accept()
|
||||
|
||||
try:
|
||||
while True:
|
||||
data = await websocket.receive_json()
|
||||
msg_type = data.get("type")
|
||||
|
||||
if msg_type == "calculate":
|
||||
calc_id = data.get("id", "")
|
||||
asyncio.create_task(_run_calculation(websocket, calc_id, data))
|
||||
|
||||
elif msg_type == "cancel":
|
||||
calc_id = data.get("id")
|
||||
token = ws_manager._cancel_tokens.get(calc_id)
|
||||
if token:
|
||||
token.cancel()
|
||||
|
||||
elif msg_type == "ping":
|
||||
await websocket.send_json({"type": "pong"})
|
||||
|
||||
except WebSocketDisconnect:
|
||||
for token in ws_manager._cancel_tokens.values():
|
||||
token.cancel()
|
||||
except Exception:
|
||||
for token in ws_manager._cancel_tokens.values():
|
||||
token.cancel()
|
||||
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
Core business logic for RFCP.
|
||||
|
||||
Existing modules: config.py, database.py
|
||||
New modules: engine.py, grid.py, calculator.py, result.py
|
||||
"""
|
||||
|
||||
Binary file not shown.
103
backend/app/core/calculator.py
Normal file
103
backend/app/core/calculator.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Point calculator — coordinates per-point propagation calculation.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
from app.propagation.base import PropagationModel, PropagationInput
|
||||
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
|
||||
from app.core.result import PointResult
|
||||
|
||||
|
||||
class PointCalculator:
|
||||
"""Calculates propagation for individual grid points."""
|
||||
|
||||
def __init__(self, model: PropagationModel, environment: str = "urban"):
|
||||
self.model = model
|
||||
self.environment = environment
|
||||
self.diffraction = KnifeEdgeDiffractionModel()
|
||||
|
||||
def calculate_point(
|
||||
self,
|
||||
site_lat: float, site_lon: float, site_height: float,
|
||||
site_power: float, site_gain: float, site_frequency: float,
|
||||
point_lat: float, point_lon: float,
|
||||
distance: float,
|
||||
has_los: bool = True,
|
||||
terrain_clearance: Optional[float] = None,
|
||||
building_loss: float = 0.0,
|
||||
extra_loss: float = 0.0,
|
||||
azimuth: Optional[float] = None,
|
||||
beamwidth: float = 360,
|
||||
) -> PointResult:
|
||||
if distance < 1:
|
||||
distance = 1
|
||||
|
||||
prop_input = PropagationInput(
|
||||
frequency_mhz=site_frequency,
|
||||
distance_m=distance,
|
||||
tx_height_m=site_height,
|
||||
rx_height_m=1.5,
|
||||
environment=self.environment,
|
||||
)
|
||||
|
||||
if self.model.is_valid_for(prop_input):
|
||||
output = self.model.calculate(prop_input)
|
||||
path_loss = output.path_loss_db
|
||||
else:
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
output = FreeSpaceModel().calculate(prop_input)
|
||||
path_loss = output.path_loss_db
|
||||
|
||||
antenna_loss = 0.0
|
||||
if azimuth is not None and beamwidth < 360:
|
||||
antenna_loss = self._antenna_pattern_loss(
|
||||
site_lat, site_lon, point_lat, point_lon, azimuth, beamwidth,
|
||||
)
|
||||
|
||||
terrain_loss = 0.0
|
||||
if terrain_clearance is not None and terrain_clearance < 0:
|
||||
terrain_loss = self.diffraction.calculate_clearance_loss(
|
||||
terrain_clearance, site_frequency,
|
||||
)
|
||||
has_los = False
|
||||
|
||||
rsrp = (
|
||||
site_power + site_gain
|
||||
- path_loss - antenna_loss
|
||||
- terrain_loss - building_loss - extra_loss
|
||||
)
|
||||
|
||||
return PointResult(
|
||||
lat=point_lat, lon=point_lon, rsrp=rsrp,
|
||||
distance=distance, path_loss=path_loss,
|
||||
terrain_loss=terrain_loss, building_loss=building_loss,
|
||||
diffraction_loss=terrain_loss, has_los=has_los,
|
||||
model_used=self.model.name,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _antenna_pattern_loss(
|
||||
site_lat: float, site_lon: float,
|
||||
point_lat: float, point_lon: float,
|
||||
azimuth: float, beamwidth: float,
|
||||
) -> float:
|
||||
lat1, lon1 = math.radians(site_lat), math.radians(site_lon)
|
||||
lat2, lon2 = math.radians(point_lat), math.radians(point_lon)
|
||||
dlon = lon2 - lon1
|
||||
x = math.sin(dlon) * math.cos(lat2)
|
||||
y = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dlon)
|
||||
bearing = (math.degrees(math.atan2(x, y)) + 360) % 360
|
||||
|
||||
angle_diff = abs(bearing - azimuth)
|
||||
if angle_diff > 180:
|
||||
angle_diff = 360 - angle_diff
|
||||
|
||||
half_bw = beamwidth / 2
|
||||
if angle_diff <= half_bw:
|
||||
loss = 3 * (angle_diff / half_bw) ** 2
|
||||
else:
|
||||
loss = 3 + 12 * ((angle_diff - half_bw) / half_bw) ** 2
|
||||
loss = min(loss, 25)
|
||||
return loss
|
||||
240
backend/app/core/engine.py
Normal file
240
backend/app/core/engine.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""
|
||||
CoverageEngine — main orchestrator for coverage calculations.
|
||||
|
||||
Coordinates data loading, model selection, parallel computation,
|
||||
and result aggregation. Does NOT implement propagation physics
|
||||
(delegated to models) or handle HTTP (delegated to API layer).
|
||||
"""
|
||||
|
||||
import time
|
||||
import asyncio
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Callable, Awaitable
|
||||
|
||||
from app.propagation.base import PropagationModel, PropagationInput
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
from app.propagation.okumura_hata import OkumuraHataModel
|
||||
from app.propagation.cost231_hata import Cost231HataModel
|
||||
from app.propagation.cost231_wi import Cost231WIModel
|
||||
from app.propagation.itu_r_p1546 import ITUR_P1546Model
|
||||
from app.propagation.longley_rice import LongleyRiceModel
|
||||
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
|
||||
|
||||
from app.core.result import CoverageResult, PointResult, compute_stats
|
||||
|
||||
|
||||
class BandType(Enum):
|
||||
LTE = "lte" # 700-2600 MHz
|
||||
UHF = "uhf" # 400-520 MHz
|
||||
VHF = "vhf" # 136-174 MHz
|
||||
CUSTOM = "custom" # User-defined
|
||||
|
||||
|
||||
class PresetType(Enum):
|
||||
FAST = "fast"
|
||||
STANDARD = "standard"
|
||||
DETAILED = "detailed"
|
||||
FULL = "full"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Site:
|
||||
id: str
|
||||
lat: float
|
||||
lon: float
|
||||
height: float # meters AGL
|
||||
power: float # dBm
|
||||
gain: float # dBi
|
||||
frequency: float # MHz
|
||||
band_type: BandType = BandType.LTE
|
||||
azimuth: Optional[float] = None
|
||||
beamwidth: float = 65
|
||||
tilt: float = 0
|
||||
environment: str = "urban"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageSettings:
|
||||
radius: float = 10000
|
||||
resolution: float = 200
|
||||
min_signal: float = -120
|
||||
preset: PresetType = PresetType.STANDARD
|
||||
band_type: BandType = BandType.LTE
|
||||
environment: str = "urban"
|
||||
|
||||
terrain_enabled: bool = True
|
||||
buildings_enabled: bool = True
|
||||
diffraction_enabled: bool = True
|
||||
reflection_enabled: bool = False
|
||||
|
||||
# Legacy toggles (backward compat)
|
||||
use_terrain: bool = True
|
||||
use_buildings: bool = True
|
||||
use_materials: bool = True
|
||||
use_dominant_path: bool = False
|
||||
use_street_canyon: bool = False
|
||||
use_reflections: bool = False
|
||||
use_water_reflection: bool = False
|
||||
use_vegetation: bool = False
|
||||
season: str = "summer"
|
||||
rain_rate: float = 0.0
|
||||
indoor_loss_type: str = "none"
|
||||
use_atmospheric: bool = False
|
||||
temperature_c: float = 15.0
|
||||
humidity_percent: float = 50.0
|
||||
|
||||
|
||||
ProgressCallback = Callable[[str, float, Optional[float]], Awaitable[None]]
|
||||
|
||||
|
||||
class CoverageEngine:
|
||||
"""
|
||||
Main orchestrator for coverage calculations.
|
||||
|
||||
Selects the appropriate propagation model based on band type
|
||||
and environment, then delegates to the existing coverage pipeline.
|
||||
"""
|
||||
|
||||
_model_registry = {
|
||||
(BandType.LTE, "urban"): Cost231HataModel,
|
||||
(BandType.LTE, "suburban"): OkumuraHataModel,
|
||||
(BandType.LTE, "rural"): OkumuraHataModel,
|
||||
(BandType.LTE, "open"): FreeSpaceModel,
|
||||
(BandType.UHF, "urban"): OkumuraHataModel,
|
||||
(BandType.UHF, "suburban"): OkumuraHataModel,
|
||||
(BandType.UHF, "rural"): LongleyRiceModel,
|
||||
(BandType.VHF, "urban"): ITUR_P1546Model,
|
||||
(BandType.VHF, "suburban"): ITUR_P1546Model,
|
||||
(BandType.VHF, "rural"): LongleyRiceModel,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self._models = {}
|
||||
self._init_models()
|
||||
self.free_space = FreeSpaceModel()
|
||||
self.diffraction = KnifeEdgeDiffractionModel()
|
||||
|
||||
def _init_models(self):
|
||||
for key, model_cls in self._model_registry.items():
|
||||
self._models[key] = model_cls()
|
||||
|
||||
def select_model(self, band: BandType, environment: str) -> PropagationModel:
|
||||
key = (band, environment)
|
||||
if key in self._models:
|
||||
return self._models[key]
|
||||
if (band, "urban") in self._models:
|
||||
return self._models[(band, "urban")]
|
||||
return OkumuraHataModel()
|
||||
|
||||
def get_available_models(self) -> dict:
|
||||
models = {}
|
||||
seen = set()
|
||||
for (band, env), model in self._models.items():
|
||||
if model.name not in seen:
|
||||
seen.add(model.name)
|
||||
models[model.name] = {
|
||||
"frequency_range": model.frequency_range,
|
||||
"distance_range": model.distance_range,
|
||||
"bands": [],
|
||||
}
|
||||
models[model.name]["bands"].append(f"{band.value}/{env}")
|
||||
return models
|
||||
|
||||
async def calculate(
|
||||
self,
|
||||
sites: List[Site],
|
||||
settings: CoverageSettings,
|
||||
progress_callback: Optional[ProgressCallback] = None,
|
||||
) -> CoverageResult:
|
||||
"""
|
||||
Main calculation entry point.
|
||||
|
||||
Delegates actual per-point work to the legacy coverage_service
|
||||
pipeline, wrapping it with the new clean interface.
|
||||
"""
|
||||
start_time = time.time()
|
||||
model = self.select_model(settings.band_type, settings.environment)
|
||||
|
||||
if progress_callback:
|
||||
await progress_callback("init", 0.05, None)
|
||||
|
||||
# Import legacy system
|
||||
from app.services.coverage_service import (
|
||||
coverage_service, SiteParams,
|
||||
CoverageSettings as LegacySettings,
|
||||
)
|
||||
from app.services.parallel_coverage_service import CancellationToken
|
||||
|
||||
legacy_settings = LegacySettings(
|
||||
radius=settings.radius,
|
||||
resolution=settings.resolution,
|
||||
min_signal=settings.min_signal,
|
||||
use_terrain=settings.use_terrain,
|
||||
use_buildings=settings.use_buildings,
|
||||
use_materials=settings.use_materials,
|
||||
use_dominant_path=settings.use_dominant_path,
|
||||
use_street_canyon=settings.use_street_canyon,
|
||||
use_reflections=settings.use_reflections,
|
||||
use_water_reflection=settings.use_water_reflection,
|
||||
use_vegetation=settings.use_vegetation,
|
||||
season=settings.season,
|
||||
rain_rate=settings.rain_rate,
|
||||
indoor_loss_type=settings.indoor_loss_type,
|
||||
use_atmospheric=settings.use_atmospheric,
|
||||
temperature_c=settings.temperature_c,
|
||||
humidity_percent=settings.humidity_percent,
|
||||
preset=settings.preset.value if isinstance(settings.preset, PresetType) else settings.preset,
|
||||
)
|
||||
|
||||
cancel_token = CancellationToken()
|
||||
|
||||
if progress_callback:
|
||||
await progress_callback("calculating", 0.25, None)
|
||||
|
||||
legacy_sites = [
|
||||
SiteParams(
|
||||
lat=s.lat, lon=s.lon, height=s.height,
|
||||
power=s.power, gain=s.gain, frequency=s.frequency,
|
||||
azimuth=s.azimuth, beamwidth=s.beamwidth,
|
||||
)
|
||||
for s in sites
|
||||
]
|
||||
|
||||
if len(legacy_sites) == 1:
|
||||
points = await coverage_service.calculate_coverage(
|
||||
legacy_sites[0], legacy_settings, cancel_token,
|
||||
)
|
||||
else:
|
||||
points = await coverage_service.calculate_multi_site_coverage(
|
||||
legacy_sites, legacy_settings, cancel_token,
|
||||
)
|
||||
|
||||
if progress_callback:
|
||||
await progress_callback("done", 1.0, None)
|
||||
|
||||
result_points = [
|
||||
PointResult(
|
||||
lat=p.lat, lon=p.lon, rsrp=p.rsrp,
|
||||
distance=p.distance, path_loss=0.0,
|
||||
terrain_loss=p.terrain_loss,
|
||||
building_loss=p.building_loss,
|
||||
diffraction_loss=0.0,
|
||||
has_los=p.has_los,
|
||||
model_used=model.name,
|
||||
)
|
||||
for p in points
|
||||
]
|
||||
|
||||
computation_time = time.time() - start_time
|
||||
|
||||
return CoverageResult(
|
||||
points=result_points,
|
||||
stats=compute_stats(result_points),
|
||||
computation_time=computation_time,
|
||||
models_used=[model.name],
|
||||
)
|
||||
|
||||
|
||||
# Singleton
|
||||
engine = CoverageEngine()
|
||||
83
backend/app/core/grid.py
Normal file
83
backend/app/core/grid.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
Grid generation for coverage calculations.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple
|
||||
from app.geometry.haversine import haversine_distance
|
||||
|
||||
|
||||
@dataclass
|
||||
class BoundingBox:
|
||||
min_lat: float
|
||||
min_lon: float
|
||||
max_lat: float
|
||||
max_lon: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class Grid:
|
||||
points: List[Tuple[float, float]]
|
||||
bounding_box: BoundingBox
|
||||
resolution: float
|
||||
radius: float
|
||||
|
||||
|
||||
class GridService:
|
||||
"""Generate coverage grid points."""
|
||||
|
||||
@staticmethod
|
||||
def generate(
|
||||
center_lat: float,
|
||||
center_lon: float,
|
||||
radius: float,
|
||||
resolution: float,
|
||||
) -> Grid:
|
||||
points = []
|
||||
|
||||
lat_step = resolution / 111000
|
||||
lon_step = resolution / (111000 * np.cos(np.radians(center_lat)))
|
||||
|
||||
lat_delta = radius / 111000
|
||||
lon_delta = radius / (111000 * np.cos(np.radians(center_lat)))
|
||||
|
||||
bbox = BoundingBox(
|
||||
min_lat=center_lat - lat_delta,
|
||||
min_lon=center_lon - lon_delta,
|
||||
max_lat=center_lat + lat_delta,
|
||||
max_lon=center_lon + lon_delta,
|
||||
)
|
||||
|
||||
lat = center_lat - lat_delta
|
||||
while lat <= center_lat + lat_delta:
|
||||
lon = center_lon - lon_delta
|
||||
while lon <= center_lon + lon_delta:
|
||||
dist = haversine_distance(center_lat, center_lon, lat, lon)
|
||||
if dist <= radius:
|
||||
points.append((lat, lon))
|
||||
lon += lon_step
|
||||
lat += lat_step
|
||||
|
||||
return Grid(points=points, bounding_box=bbox, resolution=resolution, radius=radius)
|
||||
|
||||
@staticmethod
|
||||
def generate_multi_site(sites: list, radius: float, resolution: float) -> Grid:
|
||||
all_points = set()
|
||||
min_lat = min_lon = float("inf")
|
||||
max_lat = max_lon = float("-inf")
|
||||
|
||||
for site in sites:
|
||||
grid = GridService.generate(site.lat, site.lon, radius, resolution)
|
||||
for p in grid.points:
|
||||
all_points.add((round(p[0], 7), round(p[1], 7)))
|
||||
min_lat = min(min_lat, grid.bounding_box.min_lat)
|
||||
min_lon = min(min_lon, grid.bounding_box.min_lon)
|
||||
max_lat = max(max_lat, grid.bounding_box.max_lat)
|
||||
max_lon = max(max_lon, grid.bounding_box.max_lon)
|
||||
|
||||
return Grid(
|
||||
points=list(all_points),
|
||||
bounding_box=BoundingBox(min_lat, min_lon, max_lat, max_lon),
|
||||
resolution=resolution, radius=radius,
|
||||
)
|
||||
65
backend/app/core/result.py
Normal file
65
backend/app/core/result.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""
|
||||
Coverage result aggregation and statistics.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
|
||||
@dataclass
|
||||
class PointResult:
|
||||
lat: float
|
||||
lon: float
|
||||
rsrp: float
|
||||
distance: float
|
||||
path_loss: float
|
||||
terrain_loss: float
|
||||
building_loss: float
|
||||
diffraction_loss: float
|
||||
has_los: bool
|
||||
model_used: str
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"lat": self.lat, "lon": self.lon,
|
||||
"rsrp": self.rsrp, "distance": self.distance,
|
||||
"path_loss": self.path_loss, "terrain_loss": self.terrain_loss,
|
||||
"building_loss": self.building_loss, "diffraction_loss": self.diffraction_loss,
|
||||
"has_los": self.has_los, "model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageResult:
|
||||
points: List[PointResult]
|
||||
stats: dict
|
||||
computation_time: float
|
||||
models_used: List[str]
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"points": [p.to_dict() for p in self.points],
|
||||
"count": len(self.points),
|
||||
"stats": self.stats,
|
||||
"computation_time": round(self.computation_time, 2),
|
||||
"models_used": self.models_used,
|
||||
}
|
||||
|
||||
|
||||
def compute_stats(points: List[PointResult]) -> dict:
|
||||
if not points:
|
||||
return {"min_rsrp": 0, "max_rsrp": 0, "avg_rsrp": 0,
|
||||
"los_percentage": 0, "total_points": 0}
|
||||
|
||||
rsrp_values = [p.rsrp for p in points]
|
||||
los_count = sum(1 for p in points if p.has_los)
|
||||
|
||||
return {
|
||||
"min_rsrp": min(rsrp_values),
|
||||
"max_rsrp": max(rsrp_values),
|
||||
"avg_rsrp": sum(rsrp_values) / len(rsrp_values),
|
||||
"los_percentage": los_count / len(points) * 100,
|
||||
"total_points": len(points),
|
||||
"points_with_buildings": sum(1 for p in points if p.building_loss > 0),
|
||||
"points_with_terrain_loss": sum(1 for p in points if p.terrain_loss > 0),
|
||||
}
|
||||
38
backend/app/geometry/__init__.py
Normal file
38
backend/app/geometry/__init__.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""
|
||||
Geometry operations for RF propagation calculations.
|
||||
|
||||
NumPy-dependent modules (haversine, intersection, reflection) are
|
||||
imported lazily so pure-Python modules (diffraction, los) remain
|
||||
available even when NumPy is not installed.
|
||||
"""
|
||||
|
||||
from app.geometry.diffraction import knife_edge_loss
|
||||
from app.geometry.los import check_los_terrain, fresnel_radius
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
"""Lazy import for NumPy-dependent geometry functions."""
|
||||
_numpy_exports = {
|
||||
"haversine_distance", "haversine_batch", "points_to_local_coords",
|
||||
"line_segments_intersect_batch", "line_intersects_polygons_batch",
|
||||
"calculate_reflection_points_batch", "find_best_reflection_path",
|
||||
}
|
||||
if name in _numpy_exports:
|
||||
if name in ("haversine_distance", "haversine_batch", "points_to_local_coords"):
|
||||
from app.geometry.haversine import haversine_distance, haversine_batch, points_to_local_coords
|
||||
return locals()[name]
|
||||
elif name in ("line_segments_intersect_batch", "line_intersects_polygons_batch"):
|
||||
from app.geometry.intersection import line_segments_intersect_batch, line_intersects_polygons_batch
|
||||
return locals()[name]
|
||||
elif name in ("calculate_reflection_points_batch", "find_best_reflection_path"):
|
||||
from app.geometry.reflection import calculate_reflection_points_batch, find_best_reflection_path
|
||||
return locals()[name]
|
||||
raise AttributeError(f"module 'app.geometry' has no attribute {name!r}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"haversine_distance", "haversine_batch", "points_to_local_coords",
|
||||
"line_segments_intersect_batch", "line_intersects_polygons_batch",
|
||||
"calculate_reflection_points_batch", "find_best_reflection_path",
|
||||
"knife_edge_loss", "check_los_terrain", "fresnel_radius",
|
||||
]
|
||||
40
backend/app/geometry/diffraction.py
Normal file
40
backend/app/geometry/diffraction.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""
|
||||
Knife-edge diffraction geometry calculations.
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
|
||||
def knife_edge_loss(
|
||||
d1_m: float,
|
||||
d2_m: float,
|
||||
h_m: float,
|
||||
wavelength_m: float,
|
||||
) -> float:
|
||||
"""
|
||||
Calculate diffraction loss over single knife edge.
|
||||
|
||||
Args:
|
||||
d1_m: Distance TX to obstacle
|
||||
d2_m: Distance obstacle to RX
|
||||
h_m: Obstacle height above LOS (positive = above)
|
||||
wavelength_m: Signal wavelength
|
||||
|
||||
Returns:
|
||||
Loss in dB (>= 0)
|
||||
"""
|
||||
if d1_m <= 0 or d2_m <= 0 or wavelength_m <= 0:
|
||||
return 0.0
|
||||
|
||||
v = h_m * math.sqrt(2 * (d1_m + d2_m) / (wavelength_m * d1_m * d2_m))
|
||||
|
||||
if v < -0.78:
|
||||
L = 0.0
|
||||
elif v < 0:
|
||||
L = 6.02 + 9.11 * v - 1.27 * v ** 2
|
||||
elif v < 2.4:
|
||||
L = 6.02 + 9.11 * v + 1.65 * v ** 2
|
||||
else:
|
||||
L = 12.95 + 20 * math.log10(v)
|
||||
|
||||
return max(0.0, L)
|
||||
50
backend/app/geometry/haversine.py
Normal file
50
backend/app/geometry/haversine.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
Distance calculations using the haversine formula.
|
||||
|
||||
Supports both scalar and batch (NumPy array) operations.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Tuple
|
||||
|
||||
EARTH_RADIUS = 6371000 # meters
|
||||
|
||||
|
||||
def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
||||
"""Calculate distance between two points in meters."""
|
||||
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
|
||||
dlat = lat2 - lat1
|
||||
dlon = lon2 - lon1
|
||||
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
|
||||
c = 2 * np.arcsin(np.sqrt(a))
|
||||
return float(EARTH_RADIUS * c)
|
||||
|
||||
|
||||
def haversine_batch(
|
||||
lat1: float, lon1: float,
|
||||
lats2: np.ndarray, lons2: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
"""Distance from one point to many points (meters)."""
|
||||
lat1_rad = np.radians(lat1)
|
||||
lon1_rad = np.radians(lon1)
|
||||
lats2_rad = np.radians(lats2)
|
||||
lons2_rad = np.radians(lons2)
|
||||
|
||||
dlat = lats2_rad - lat1_rad
|
||||
dlon = lons2_rad - lon1_rad
|
||||
|
||||
a = np.sin(dlat / 2) ** 2 + np.cos(lat1_rad) * np.cos(lats2_rad) * np.sin(dlon / 2) ** 2
|
||||
c = 2 * np.arcsin(np.sqrt(a))
|
||||
|
||||
return EARTH_RADIUS * c
|
||||
|
||||
|
||||
def points_to_local_coords(
|
||||
ref_lat: float, ref_lon: float,
|
||||
lats: np.ndarray, lons: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Convert lat/lon to local X/Y meters (equirectangular projection)."""
|
||||
cos_lat = np.cos(np.radians(ref_lat))
|
||||
x = (lons - ref_lon) * 111320.0 * cos_lat
|
||||
y = (lats - ref_lat) * 110540.0
|
||||
return x, y
|
||||
116
backend/app/geometry/intersection.py
Normal file
116
backend/app/geometry/intersection.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""
|
||||
Vectorized line-segment and line-polygon intersection checks.
|
||||
|
||||
All operations use NumPy for batch processing.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def line_segments_intersect_batch(
|
||||
p1: np.ndarray, p2: np.ndarray,
|
||||
segments_start: np.ndarray, segments_end: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Check if line p1->p2 intersects with N segments.
|
||||
|
||||
Args:
|
||||
p1, p2: shape (2,)
|
||||
segments_start, segments_end: shape (N, 2)
|
||||
|
||||
Returns:
|
||||
intersects: bool array (N,)
|
||||
t_values: parameter along p1->p2 (N,)
|
||||
"""
|
||||
d = p2 - p1
|
||||
seg_d = segments_end - segments_start
|
||||
|
||||
cross = d[0] * seg_d[:, 1] - d[1] * seg_d[:, 0]
|
||||
parallel_mask = np.abs(cross) < 1e-10
|
||||
cross_safe = np.where(parallel_mask, 1.0, cross)
|
||||
|
||||
dp = p1 - segments_start
|
||||
t = (dp[:, 0] * seg_d[:, 1] - dp[:, 1] * seg_d[:, 0]) / cross_safe
|
||||
u = (dp[:, 0] * d[1] - dp[:, 1] * d[0]) / cross_safe
|
||||
|
||||
intersects = ~parallel_mask & (t >= 0) & (t <= 1) & (u >= 0) & (u <= 1)
|
||||
return intersects, t
|
||||
|
||||
|
||||
def line_intersects_polygons_batch(
|
||||
p1: np.ndarray, p2: np.ndarray,
|
||||
polygons_x: np.ndarray, polygons_y: np.ndarray,
|
||||
polygon_lengths: np.ndarray,
|
||||
max_polygons: int = 30,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Check if line p1->p2 intersects multiple polygons.
|
||||
|
||||
Uses bounding-box pre-filter to limit work when polygon count is large.
|
||||
|
||||
Args:
|
||||
p1, p2: shape (2,)
|
||||
polygons_x, polygons_y: flattened vertex arrays
|
||||
polygon_lengths: vertices per polygon (num_polygons,)
|
||||
max_polygons: only check nearest N polygons
|
||||
|
||||
Returns:
|
||||
intersects: bool (num_polygons,)
|
||||
min_distances: distance to first hit (num_polygons,)
|
||||
"""
|
||||
num_polygons = len(polygon_lengths)
|
||||
|
||||
if num_polygons == 0:
|
||||
return np.array([], dtype=bool), np.array([])
|
||||
|
||||
intersects = np.zeros(num_polygons, dtype=bool)
|
||||
min_t = np.full(num_polygons, np.inf)
|
||||
|
||||
# Pre-filter: bounding box check
|
||||
if num_polygons > max_polygons:
|
||||
buf = 50.0
|
||||
line_min_x = min(p1[0], p2[0]) - buf
|
||||
line_max_x = max(p1[0], p2[0]) + buf
|
||||
line_min_y = min(p1[1], p2[1]) - buf
|
||||
line_max_y = max(p1[1], p2[1]) + buf
|
||||
|
||||
nearby_mask = np.zeros(num_polygons, dtype=bool)
|
||||
vi = 0
|
||||
for i, length in enumerate(polygon_lengths):
|
||||
if length >= 3:
|
||||
cx = polygons_x[vi]
|
||||
cy = polygons_y[vi]
|
||||
if line_min_x <= cx <= line_max_x and line_min_y <= cy <= line_max_y:
|
||||
nearby_mask[i] = True
|
||||
vi += length
|
||||
|
||||
nearby_indices = np.where(nearby_mask)[0]
|
||||
if len(nearby_indices) > max_polygons:
|
||||
nearby_mask = np.zeros(num_polygons, dtype=bool)
|
||||
nearby_mask[nearby_indices[:max_polygons]] = True
|
||||
else:
|
||||
nearby_mask = np.ones(num_polygons, dtype=bool)
|
||||
|
||||
idx = 0
|
||||
for i, length in enumerate(polygon_lengths):
|
||||
if length < 3 or not nearby_mask[i]:
|
||||
idx += length
|
||||
continue
|
||||
|
||||
px = polygons_x[idx:idx + length]
|
||||
py = polygons_y[idx:idx + length]
|
||||
|
||||
starts = np.stack([px, py], axis=1)
|
||||
ends = np.stack([np.roll(px, -1), np.roll(py, -1)], axis=1)
|
||||
|
||||
edge_intersects, t_vals = line_segments_intersect_batch(p1, p2, starts, ends)
|
||||
|
||||
if np.any(edge_intersects):
|
||||
intersects[i] = True
|
||||
min_t[i] = np.min(t_vals[edge_intersects])
|
||||
|
||||
idx += length
|
||||
|
||||
line_length = np.linalg.norm(p2 - p1)
|
||||
min_distances = min_t * line_length
|
||||
|
||||
return intersects, min_distances
|
||||
85
backend/app/geometry/los.py
Normal file
85
backend/app/geometry/los.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
Line-of-sight checks using terrain profile data.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Optional, Dict, List
|
||||
|
||||
EARTH_RADIUS = 6371000
|
||||
K_FACTOR = 4 / 3 # Standard atmospheric refraction
|
||||
|
||||
|
||||
def check_los_terrain(
|
||||
profile: List[dict],
|
||||
tx_height: float,
|
||||
rx_height: float,
|
||||
) -> dict:
|
||||
"""
|
||||
Check line-of-sight from a terrain elevation profile.
|
||||
|
||||
Args:
|
||||
profile: List of dicts with 'elevation' and 'distance' keys.
|
||||
tx_height: TX antenna height above ground (meters).
|
||||
rx_height: RX height above ground (meters).
|
||||
|
||||
Returns:
|
||||
dict with has_los, clearance, blocked_at
|
||||
"""
|
||||
if not profile:
|
||||
return {"has_los": True, "clearance": 0.0, "blocked_at": None}
|
||||
|
||||
tx_ground = profile[0]["elevation"]
|
||||
rx_ground = profile[-1]["elevation"]
|
||||
tx_total = tx_ground + tx_height
|
||||
rx_total = rx_ground + rx_height
|
||||
total_distance = profile[-1]["distance"]
|
||||
|
||||
min_clearance = float("inf")
|
||||
blocked_at = None
|
||||
|
||||
for point in profile:
|
||||
d = point["distance"]
|
||||
terrain_elev = point["elevation"]
|
||||
|
||||
if total_distance == 0:
|
||||
los_height = tx_total
|
||||
else:
|
||||
los_height = tx_total + (rx_total - tx_total) * (d / total_distance)
|
||||
|
||||
# Earth curvature correction
|
||||
effective_radius = K_FACTOR * EARTH_RADIUS
|
||||
curvature = (d * (total_distance - d)) / (2 * effective_radius)
|
||||
los_height_corrected = los_height - curvature
|
||||
|
||||
clearance = los_height_corrected - terrain_elev
|
||||
|
||||
if clearance < min_clearance:
|
||||
min_clearance = clearance
|
||||
if clearance <= 0:
|
||||
blocked_at = d
|
||||
|
||||
return {
|
||||
"has_los": min_clearance > 0,
|
||||
"clearance": min_clearance,
|
||||
"blocked_at": blocked_at,
|
||||
}
|
||||
|
||||
|
||||
def fresnel_radius(
|
||||
d1_m: float, d2_m: float, wavelength_m: float, zone: int = 1
|
||||
) -> float:
|
||||
"""Calculate Fresnel zone radius at a point along the path.
|
||||
|
||||
Args:
|
||||
d1_m: Distance from TX to point
|
||||
d2_m: Distance from point to RX
|
||||
wavelength_m: Signal wavelength
|
||||
zone: Fresnel zone number (default 1)
|
||||
|
||||
Returns:
|
||||
Radius in meters
|
||||
"""
|
||||
total = d1_m + d2_m
|
||||
if total <= 0:
|
||||
return 0.0
|
||||
return math.sqrt(zone * wavelength_m * d1_m * d2_m / total)
|
||||
163
backend/app/geometry/reflection.py
Normal file
163
backend/app/geometry/reflection.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""
|
||||
Vectorized reflection point calculations using mirror-image method.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Tuple, Optional
|
||||
from app.geometry.intersection import line_intersects_polygons_batch
|
||||
|
||||
|
||||
def calculate_reflection_points_batch(
|
||||
tx: np.ndarray, rx: np.ndarray,
|
||||
wall_starts: np.ndarray, wall_ends: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Calculate reflection points on N walls via mirror-image method.
|
||||
|
||||
Args:
|
||||
tx, rx: shape (2,)
|
||||
wall_starts, wall_ends: shape (N, 2)
|
||||
|
||||
Returns:
|
||||
reflection_points: (N, 2)
|
||||
valid: bool (N,)
|
||||
"""
|
||||
wall_vec = wall_ends - wall_starts
|
||||
wall_length = np.linalg.norm(wall_vec, axis=1, keepdims=True)
|
||||
wall_unit = wall_vec / np.maximum(wall_length, 1e-10)
|
||||
|
||||
normals = np.stack([-wall_unit[:, 1], wall_unit[:, 0]], axis=1)
|
||||
|
||||
tx_to_wall = tx - wall_starts
|
||||
tx_dist_to_wall = np.sum(tx_to_wall * normals, axis=1, keepdims=True)
|
||||
tx_mirror = tx - 2 * tx_dist_to_wall * normals
|
||||
|
||||
rx_to_mirror = tx_mirror - rx
|
||||
|
||||
cross_denom = (rx_to_mirror[:, 0] * wall_vec[:, 1] -
|
||||
rx_to_mirror[:, 1] * wall_vec[:, 0])
|
||||
|
||||
valid_denom = np.abs(cross_denom) > 1e-10
|
||||
cross_denom_safe = np.where(valid_denom, cross_denom, 1.0)
|
||||
|
||||
rx_to_start = wall_starts - rx
|
||||
t = (rx_to_start[:, 0] * rx_to_mirror[:, 1] -
|
||||
rx_to_start[:, 1] * rx_to_mirror[:, 0]) / cross_denom_safe
|
||||
|
||||
reflection_points = wall_starts + t[:, np.newaxis] * wall_vec
|
||||
|
||||
valid = valid_denom & (t >= 0) & (t <= 1) & (tx_dist_to_wall[:, 0] > 0)
|
||||
|
||||
return reflection_points, valid
|
||||
|
||||
|
||||
def find_best_reflection_path(
|
||||
tx: np.ndarray, rx: np.ndarray,
|
||||
building_walls_start: np.ndarray,
|
||||
building_walls_end: np.ndarray,
|
||||
wall_to_building: np.ndarray,
|
||||
obstacle_polygons_x: np.ndarray,
|
||||
obstacle_polygons_y: np.ndarray,
|
||||
obstacle_lengths: np.ndarray,
|
||||
max_candidates: int = 50,
|
||||
max_walls: int = 100,
|
||||
max_los_checks: int = 10,
|
||||
) -> Tuple[Optional[np.ndarray], float, float]:
|
||||
"""Find best single-reflection path using vectorized ops.
|
||||
|
||||
Args:
|
||||
max_walls: Only consider closest N walls for reflection candidates.
|
||||
max_los_checks: Only verify LOS for top N shortest reflection paths.
|
||||
|
||||
Returns:
|
||||
best_reflection_point: (2,) or None
|
||||
best_path_length: meters
|
||||
best_reflection_loss: dB
|
||||
"""
|
||||
num_walls = len(building_walls_start)
|
||||
if num_walls == 0:
|
||||
return None, np.inf, 0.0
|
||||
|
||||
# Limit walls by distance to path midpoint
|
||||
if num_walls > max_walls:
|
||||
midpoint = (tx + rx) / 2
|
||||
wall_midpoints = (building_walls_start + building_walls_end) / 2
|
||||
wall_distances = np.linalg.norm(wall_midpoints - midpoint, axis=1)
|
||||
closest = np.argpartition(wall_distances, max_walls)[:max_walls]
|
||||
building_walls_start = building_walls_start[closest]
|
||||
building_walls_end = building_walls_end[closest]
|
||||
wall_to_building = wall_to_building[closest]
|
||||
|
||||
refl_points, valid = calculate_reflection_points_batch(
|
||||
tx, rx, building_walls_start, building_walls_end,
|
||||
)
|
||||
|
||||
if not np.any(valid):
|
||||
return None, np.inf, 0.0
|
||||
|
||||
valid_indices = np.where(valid)[0]
|
||||
valid_refl = refl_points[valid]
|
||||
|
||||
tx_to_refl = np.linalg.norm(valid_refl - tx, axis=1)
|
||||
refl_to_rx = np.linalg.norm(rx - valid_refl, axis=1)
|
||||
path_lengths = tx_to_refl + refl_to_rx
|
||||
|
||||
# Direct distance filter
|
||||
direct_dist = np.linalg.norm(rx - tx)
|
||||
within_range = path_lengths <= direct_dist * 2.0
|
||||
if not np.any(within_range):
|
||||
return None, np.inf, 0.0
|
||||
|
||||
valid_indices = valid_indices[within_range]
|
||||
valid_refl = valid_refl[within_range]
|
||||
path_lengths = path_lengths[within_range]
|
||||
|
||||
# Keep top candidates by shortest path
|
||||
if len(valid_indices) > max_candidates:
|
||||
top_idx = np.argpartition(path_lengths, max_candidates)[:max_candidates]
|
||||
valid_indices = valid_indices[top_idx]
|
||||
valid_refl = valid_refl[top_idx]
|
||||
path_lengths = path_lengths[top_idx]
|
||||
|
||||
# Sort by path length for early exit
|
||||
sort_order = np.argsort(path_lengths)
|
||||
valid_refl = valid_refl[sort_order]
|
||||
path_lengths = path_lengths[sort_order]
|
||||
|
||||
# Check LOS only for top N shortest candidates
|
||||
check_count = min(len(valid_refl), max_los_checks)
|
||||
best_idx = -1
|
||||
best_length = np.inf
|
||||
|
||||
for i in range(check_count):
|
||||
length = path_lengths[i]
|
||||
if length >= best_length:
|
||||
continue
|
||||
|
||||
refl_pt = valid_refl[i]
|
||||
|
||||
intersects1, _ = line_intersects_polygons_batch(
|
||||
tx, refl_pt, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
|
||||
)
|
||||
if np.any(intersects1):
|
||||
continue
|
||||
|
||||
intersects2, _ = line_intersects_polygons_batch(
|
||||
refl_pt, rx, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
|
||||
)
|
||||
if np.any(intersects2):
|
||||
continue
|
||||
|
||||
best_idx = i
|
||||
best_length = length
|
||||
break # sorted by length, first valid is best
|
||||
|
||||
if best_idx < 0:
|
||||
return None, np.inf, 0.0
|
||||
|
||||
best_point = valid_refl[best_idx]
|
||||
|
||||
# Reflection loss: 3-10 dB depending on path ratio
|
||||
path_ratio = best_length / max(direct_dist, 1.0)
|
||||
reflection_loss = 3.0 + 7.0 * min(1.0, (path_ratio - 1.0) * 2)
|
||||
|
||||
return best_point, best_length, reflection_loss
|
||||
@@ -1,14 +1,62 @@
|
||||
from contextlib import asynccontextmanager
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
import platform
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi import FastAPI, WebSocket
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.core.database import connect_to_mongo, close_mongo_connection
|
||||
from app.api.routes import health, projects, terrain, coverage, regions, system
|
||||
from app.api.routes import health, projects, terrain, coverage, regions, system, gpu
|
||||
from app.api.websocket import websocket_endpoint
|
||||
|
||||
logger = logging.getLogger("rfcp.startup")
|
||||
|
||||
|
||||
def check_gpu_availability():
|
||||
"""Log GPU status on startup for debugging."""
|
||||
is_wsl = "microsoft" in platform.release().lower()
|
||||
env_note = " (WSL2)" if is_wsl else ""
|
||||
|
||||
# Check CuPy / CUDA
|
||||
try:
|
||||
import cupy as cp
|
||||
device_count = cp.cuda.runtime.getDeviceCount()
|
||||
if device_count > 0:
|
||||
props = cp.cuda.runtime.getDeviceProperties(0)
|
||||
name = props["name"]
|
||||
if isinstance(name, bytes):
|
||||
name = name.decode()
|
||||
mem_mb = props["totalGlobalMem"] // (1024 * 1024)
|
||||
logger.info(f"GPU detected{env_note}: {name} ({mem_mb} MB VRAM)")
|
||||
logger.info(f"CuPy {cp.__version__}, CUDA devices: {device_count}")
|
||||
else:
|
||||
logger.warning(f"CuPy installed but no CUDA devices found{env_note}")
|
||||
except Exception as e:
|
||||
logger.warning(f"CuPy FAILED {env_note}: {e}")
|
||||
if is_wsl:
|
||||
logger.warning("Install: pip3 install cupy-cuda12x --break-system-packages")
|
||||
else:
|
||||
logger.warning("Install: pip install cupy-cuda12x")
|
||||
except Exception as e:
|
||||
logger.warning(f"CuPy error{env_note}: {e}")
|
||||
|
||||
# Check PyOpenCL
|
||||
try:
|
||||
import pyopencl as cl
|
||||
platforms = cl.get_platforms()
|
||||
for p in platforms:
|
||||
for d in p.get_devices():
|
||||
logger.info(f"OpenCL device: {d.name.strip()}")
|
||||
except Exception as e:
|
||||
logger.debug("PyOpenCL not installed (optional)")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Log GPU status on startup
|
||||
check_gpu_availability()
|
||||
await connect_to_mongo()
|
||||
yield
|
||||
await close_mongo_connection()
|
||||
@@ -17,7 +65,7 @@ async def lifespan(app: FastAPI):
|
||||
app = FastAPI(
|
||||
title="RFCP Backend API",
|
||||
description="RF Coverage Planning Backend",
|
||||
version="1.6.0",
|
||||
version="3.0.0",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
@@ -30,18 +78,22 @@ app.add_middleware(
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Routes
|
||||
# REST routes
|
||||
app.include_router(health.router, prefix="/api/health", tags=["health"])
|
||||
app.include_router(projects.router, prefix="/api/projects", tags=["projects"])
|
||||
app.include_router(terrain.router, prefix="/api/terrain", tags=["terrain"])
|
||||
app.include_router(coverage.router, prefix="/api/coverage", tags=["coverage"])
|
||||
app.include_router(regions.router, prefix="/api/regions", tags=["regions"])
|
||||
app.include_router(system.router, prefix="/api/system", tags=["system"])
|
||||
app.include_router(gpu.router, prefix="/api/gpu", tags=["gpu"])
|
||||
|
||||
# WebSocket endpoint for real-time coverage with progress
|
||||
app.websocket("/ws")(websocket_endpoint)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return {"message": "RFCP Backend API", "version": "1.5.1"}
|
||||
return {"message": "RFCP Backend API", "version": "3.0.0"}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
11
backend/app/parallel/__init__.py
Normal file
11
backend/app/parallel/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""
|
||||
Parallel processing infrastructure for coverage calculations.
|
||||
"""
|
||||
|
||||
from app.parallel.manager import SharedMemoryManager, SharedTerrainData, SharedBuildingData
|
||||
from app.parallel.pool import ManagedProcessPool
|
||||
|
||||
__all__ = [
|
||||
"SharedMemoryManager", "SharedTerrainData", "SharedBuildingData",
|
||||
"ManagedProcessPool",
|
||||
]
|
||||
174
backend/app/parallel/manager.py
Normal file
174
backend/app/parallel/manager.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""
|
||||
Shared Memory Manager for parallel processing.
|
||||
|
||||
Instead of copying building/terrain data to each worker,
|
||||
store data in shared memory that all workers can read.
|
||||
"""
|
||||
|
||||
import multiprocessing.shared_memory as shm
|
||||
import numpy as np
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class SharedTerrainData:
|
||||
"""Reference to terrain data in shared memory."""
|
||||
shm_name: str
|
||||
shape: tuple
|
||||
bounds: tuple # (min_lat, min_lon, max_lat, max_lon)
|
||||
resolution: float
|
||||
|
||||
def get_array(self) -> np.ndarray:
|
||||
existing_shm = shm.SharedMemory(name=self.shm_name)
|
||||
return np.ndarray(self.shape, dtype=np.int16, buffer=existing_shm.buf)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SharedBuildingData:
|
||||
"""Reference to building data in shared memory."""
|
||||
shm_centroids_name: str # (N, 2) float64
|
||||
shm_heights_name: str # (N,) float32
|
||||
shm_vertices_name: str # (total_verts, 2) float64
|
||||
shm_offsets_name: str # (N+1,) int32
|
||||
count: int
|
||||
total_vertices: int
|
||||
|
||||
def get_centroids(self) -> np.ndarray:
|
||||
existing = shm.SharedMemory(name=self.shm_centroids_name)
|
||||
return np.ndarray((self.count, 2), dtype=np.float64, buffer=existing.buf)
|
||||
|
||||
def get_heights(self) -> np.ndarray:
|
||||
existing = shm.SharedMemory(name=self.shm_heights_name)
|
||||
return np.ndarray((self.count,), dtype=np.float32, buffer=existing.buf)
|
||||
|
||||
def get_offsets(self) -> np.ndarray:
|
||||
existing = shm.SharedMemory(name=self.shm_offsets_name)
|
||||
return np.ndarray((self.count + 1,), dtype=np.int32, buffer=existing.buf)
|
||||
|
||||
def get_vertices(self) -> np.ndarray:
|
||||
existing = shm.SharedMemory(name=self.shm_vertices_name)
|
||||
return np.ndarray((self.total_vertices, 2), dtype=np.float64, buffer=existing.buf)
|
||||
|
||||
def get_polygon(self, idx: int) -> np.ndarray:
|
||||
offsets = self.get_offsets()
|
||||
vertices = self.get_vertices()
|
||||
start, end = offsets[idx], offsets[idx + 1]
|
||||
return vertices[start:end]
|
||||
|
||||
|
||||
class SharedMemoryManager:
|
||||
"""
|
||||
Manages shared memory blocks for parallel processing.
|
||||
|
||||
Usage:
|
||||
manager = SharedMemoryManager()
|
||||
terrain_ref = manager.store_terrain(heights, bounds, resolution)
|
||||
buildings_ref = manager.store_buildings(buildings)
|
||||
|
||||
# Pass references (small dataclasses) to workers
|
||||
pool.map(worker_func, points, terrain_ref, buildings_ref)
|
||||
|
||||
# Workers attach to shared memory — no copy!
|
||||
terrain = terrain_ref.get_array()
|
||||
|
||||
# Cleanup when done
|
||||
manager.cleanup()
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._shm_blocks: list = []
|
||||
|
||||
def store_terrain(
|
||||
self, heights: np.ndarray, bounds: tuple, resolution: float,
|
||||
) -> SharedTerrainData:
|
||||
"""Store terrain heights in shared memory."""
|
||||
shm_block = shm.SharedMemory(create=True, size=heights.nbytes)
|
||||
self._shm_blocks.append(shm_block)
|
||||
|
||||
shm_array = np.ndarray(heights.shape, dtype=heights.dtype, buffer=shm_block.buf)
|
||||
shm_array[:] = heights[:]
|
||||
|
||||
return SharedTerrainData(
|
||||
shm_name=shm_block.name,
|
||||
shape=heights.shape,
|
||||
bounds=bounds,
|
||||
resolution=resolution,
|
||||
)
|
||||
|
||||
def store_buildings(self, buildings: list) -> Optional[SharedBuildingData]:
|
||||
"""Store building data in shared memory.
|
||||
|
||||
Args:
|
||||
buildings: List of Building objects or dicts with geometry.
|
||||
|
||||
Returns:
|
||||
SharedBuildingData reference, or None if no buildings.
|
||||
"""
|
||||
n = len(buildings)
|
||||
if n == 0:
|
||||
return None
|
||||
|
||||
# Extract centroids
|
||||
centroids = np.zeros((n, 2), dtype=np.float64)
|
||||
heights = np.zeros(n, dtype=np.float32)
|
||||
all_vertices = []
|
||||
offsets = [0]
|
||||
|
||||
for i, b in enumerate(buildings):
|
||||
# Support both dict and object forms
|
||||
if hasattr(b, 'geometry'):
|
||||
geom = b.geometry
|
||||
h = getattr(b, 'height', 10.0)
|
||||
else:
|
||||
geom = b.get('geometry', [])
|
||||
h = b.get('height', 10.0)
|
||||
|
||||
if geom:
|
||||
lats = [p[1] for p in geom]
|
||||
lons = [p[0] for p in geom]
|
||||
centroids[i] = [sum(lats) / len(lats), sum(lons) / len(lons)]
|
||||
for lon, lat in geom:
|
||||
all_vertices.append([lat, lon])
|
||||
heights[i] = h or 10.0
|
||||
offsets.append(len(all_vertices))
|
||||
|
||||
vertices = np.array(all_vertices, dtype=np.float64) if all_vertices else np.zeros((0, 2), dtype=np.float64)
|
||||
offsets = np.array(offsets, dtype=np.int32)
|
||||
|
||||
# Create shared memory
|
||||
shm_centroids = shm.SharedMemory(create=True, size=max(centroids.nbytes, 1))
|
||||
shm_heights = shm.SharedMemory(create=True, size=max(heights.nbytes, 1))
|
||||
shm_vertices = shm.SharedMemory(create=True, size=max(vertices.nbytes, 1))
|
||||
shm_offsets = shm.SharedMemory(create=True, size=max(offsets.nbytes, 1))
|
||||
|
||||
self._shm_blocks.extend([shm_centroids, shm_heights, shm_vertices, shm_offsets])
|
||||
|
||||
# Copy data
|
||||
if centroids.nbytes > 0:
|
||||
np.ndarray(centroids.shape, dtype=centroids.dtype, buffer=shm_centroids.buf)[:] = centroids
|
||||
if heights.nbytes > 0:
|
||||
np.ndarray(heights.shape, dtype=heights.dtype, buffer=shm_heights.buf)[:] = heights
|
||||
if vertices.nbytes > 0:
|
||||
np.ndarray(vertices.shape, dtype=vertices.dtype, buffer=shm_vertices.buf)[:] = vertices
|
||||
if offsets.nbytes > 0:
|
||||
np.ndarray(offsets.shape, dtype=offsets.dtype, buffer=shm_offsets.buf)[:] = offsets
|
||||
|
||||
return SharedBuildingData(
|
||||
shm_centroids_name=shm_centroids.name,
|
||||
shm_heights_name=shm_heights.name,
|
||||
shm_vertices_name=shm_vertices.name,
|
||||
shm_offsets_name=shm_offsets.name,
|
||||
count=n,
|
||||
total_vertices=len(all_vertices),
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
"""Release all shared memory blocks."""
|
||||
for block in self._shm_blocks:
|
||||
try:
|
||||
block.close()
|
||||
block.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
self._shm_blocks.clear()
|
||||
136
backend/app/parallel/pool.py
Normal file
136
backend/app/parallel/pool.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""
|
||||
Managed process pool with automatic cleanup.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import time
|
||||
import multiprocessing as mp
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from typing import List, Dict, Tuple, Optional, Callable
|
||||
|
||||
|
||||
class ManagedProcessPool:
|
||||
"""
|
||||
Process pool wrapper with:
|
||||
- Automatic cleanup on exit
|
||||
- Worker process kill on failure
|
||||
- Progress reporting
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers: int = 6):
|
||||
self.max_workers = min(max_workers, 6)
|
||||
self._pool: Optional[ProcessPoolExecutor] = None
|
||||
|
||||
def map_chunks(
|
||||
self,
|
||||
worker_fn: Callable,
|
||||
chunks: List[tuple],
|
||||
log_fn: Optional[Callable] = None,
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Submit chunks to the pool and collect results.
|
||||
|
||||
Args:
|
||||
worker_fn: Function to call for each chunk
|
||||
chunks: List of (chunk_data, *args) tuples
|
||||
log_fn: Progress logging function
|
||||
|
||||
Returns:
|
||||
Flattened list of result dicts
|
||||
"""
|
||||
if log_fn is None:
|
||||
log_fn = lambda msg: print(f"[POOL] {msg}", flush=True)
|
||||
|
||||
all_results: List[Dict] = []
|
||||
|
||||
try:
|
||||
ctx = mp.get_context('spawn')
|
||||
self._pool = ProcessPoolExecutor(
|
||||
max_workers=self.max_workers, mp_context=ctx,
|
||||
)
|
||||
|
||||
futures = {
|
||||
self._pool.submit(worker_fn, chunk): i
|
||||
for i, chunk in enumerate(chunks)
|
||||
}
|
||||
|
||||
completed = 0
|
||||
t0 = time.time()
|
||||
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
chunk_results = future.result()
|
||||
all_results.extend(chunk_results)
|
||||
except Exception as e:
|
||||
log_fn(f"Chunk error: {e}")
|
||||
|
||||
completed += 1
|
||||
elapsed = time.time() - t0
|
||||
pct = completed * 100 // len(chunks)
|
||||
log_fn(f"Progress: {completed}/{len(chunks)} ({pct}%)")
|
||||
|
||||
except Exception as e:
|
||||
log_fn(f"Pool error: {e}")
|
||||
|
||||
finally:
|
||||
if self._pool:
|
||||
self._pool.shutdown(wait=False, cancel_futures=True)
|
||||
time.sleep(0.5)
|
||||
killed = self._kill_orphans()
|
||||
if killed > 0:
|
||||
log_fn(f"Cleaned up {killed} orphaned workers")
|
||||
|
||||
return all_results
|
||||
|
||||
@staticmethod
|
||||
def _kill_orphans() -> int:
|
||||
"""Kill orphaned rfcp-server worker processes."""
|
||||
my_pid = os.getpid()
|
||||
killed = 0
|
||||
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['tasklist', '/FI', 'IMAGENAME eq rfcp-server.exe', '/FO', 'CSV', '/NH'],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
for line in result.stdout.strip().split('\n'):
|
||||
if 'rfcp-server.exe' not in line:
|
||||
continue
|
||||
parts = line.split(',')
|
||||
if len(parts) >= 2:
|
||||
pid_str = parts[1].strip().strip('"')
|
||||
try:
|
||||
pid = int(pid_str)
|
||||
if pid != my_pid:
|
||||
subprocess.run(
|
||||
['taskkill', '/F', '/PID', str(pid)],
|
||||
capture_output=True, timeout=5,
|
||||
)
|
||||
killed += 1
|
||||
except (ValueError, subprocess.TimeoutExpired):
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['pgrep', '-f', 'rfcp-server'],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
for pid_str in result.stdout.strip().split('\n'):
|
||||
if not pid_str:
|
||||
continue
|
||||
try:
|
||||
pid = int(pid_str)
|
||||
if pid != my_pid:
|
||||
os.kill(pid, 9)
|
||||
killed += 1
|
||||
except (ValueError, ProcessLookupError, PermissionError):
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return killed
|
||||
64
backend/app/parallel/worker.py
Normal file
64
backend/app/parallel/worker.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
Worker functions for parallel coverage calculation.
|
||||
|
||||
These run in separate processes and access shared memory data.
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Optional
|
||||
from app.parallel.manager import SharedTerrainData, SharedBuildingData
|
||||
|
||||
|
||||
def process_chunk(
|
||||
chunk: List[tuple],
|
||||
terrain_cache: dict,
|
||||
buildings: list,
|
||||
osm_data: dict,
|
||||
config: dict,
|
||||
) -> List[dict]:
|
||||
"""
|
||||
Process a chunk of grid points.
|
||||
|
||||
This is the standard worker function used by both Ray and ProcessPoolExecutor.
|
||||
It re-uses the existing coverage calculation logic.
|
||||
"""
|
||||
# Inject terrain cache into the module-level singleton
|
||||
from app.services.terrain_service import terrain_service
|
||||
terrain_service._tile_cache = terrain_cache
|
||||
|
||||
# Build spatial index
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
spatial_idx = SpatialIndex()
|
||||
if buildings:
|
||||
spatial_idx.build(buildings)
|
||||
|
||||
# Process points using existing calculator
|
||||
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
|
||||
|
||||
site = SiteParams(**config['site_dict'])
|
||||
settings = CoverageSettings(**config['settings_dict'])
|
||||
svc = CoverageService()
|
||||
|
||||
timing = {
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
}
|
||||
|
||||
precomputed = config.get('precomputed')
|
||||
|
||||
results = []
|
||||
for lat, lon, point_elev in chunk:
|
||||
pre = precomputed.get((lat, lon)) if precomputed else None
|
||||
point = svc._calculate_point_sync(
|
||||
site, lat, lon, settings,
|
||||
buildings, osm_data.get('streets', []),
|
||||
spatial_idx, osm_data.get('water_bodies', []),
|
||||
osm_data.get('vegetation_areas', []),
|
||||
config['site_elevation'], point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
)
|
||||
if point.rsrp >= settings.min_signal:
|
||||
results.append(point.model_dump())
|
||||
|
||||
return results
|
||||
21
backend/app/propagation/__init__.py
Normal file
21
backend/app/propagation/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""
|
||||
Propagation models for RF coverage calculation.
|
||||
|
||||
Each model implements the PropagationModel interface and is stateless/thread-safe.
|
||||
"""
|
||||
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
from app.propagation.okumura_hata import OkumuraHataModel
|
||||
from app.propagation.cost231_hata import Cost231HataModel
|
||||
from app.propagation.cost231_wi import Cost231WIModel
|
||||
from app.propagation.itu_r_p1546 import ITUR_P1546Model
|
||||
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
|
||||
from app.propagation.longley_rice import LongleyRiceModel
|
||||
|
||||
__all__ = [
|
||||
"PropagationModel", "PropagationInput", "PropagationOutput",
|
||||
"FreeSpaceModel", "OkumuraHataModel", "Cost231HataModel",
|
||||
"Cost231WIModel", "ITUR_P1546Model", "KnifeEdgeDiffractionModel",
|
||||
"LongleyRiceModel",
|
||||
]
|
||||
87
backend/app/propagation/base.py
Normal file
87
backend/app/propagation/base.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
Abstract base class for all propagation models.
|
||||
|
||||
Each model implements a single, well-defined propagation algorithm.
|
||||
Models are stateless and can be called concurrently.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class PropagationInput:
|
||||
"""Input for propagation calculation."""
|
||||
frequency_mhz: float
|
||||
distance_m: float
|
||||
tx_height_m: float
|
||||
rx_height_m: float
|
||||
environment: str = "urban" # urban, suburban, rural, open
|
||||
|
||||
# Optional terrain info
|
||||
terrain_clearance_m: Optional[float] = None
|
||||
terrain_roughness_m: Optional[float] = None
|
||||
|
||||
# Optional building info
|
||||
building_height_m: Optional[float] = None
|
||||
street_width_m: Optional[float] = None
|
||||
building_separation_m: Optional[float] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class PropagationOutput:
|
||||
"""Output from propagation calculation."""
|
||||
path_loss_db: float
|
||||
model_name: str
|
||||
is_los: bool
|
||||
breakdown: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
class PropagationModel(ABC):
|
||||
"""
|
||||
Abstract base class for all propagation models.
|
||||
|
||||
Each model implements a single, well-defined propagation algorithm.
|
||||
Models are stateless and can be called concurrently.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Model name for logging/display."""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def frequency_range(self) -> tuple:
|
||||
"""Valid frequency range (min_mhz, max_mhz)."""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def distance_range(self) -> tuple:
|
||||
"""Valid distance range (min_m, max_m)."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
"""
|
||||
Calculate path loss for given input.
|
||||
|
||||
This method MUST be:
|
||||
- Stateless (no side effects)
|
||||
- Thread-safe (can be called concurrently)
|
||||
- Fast (no I/O, no heavy computation)
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_valid_for(self, input: PropagationInput) -> bool:
|
||||
"""Check if this model is valid for given input."""
|
||||
freq_min, freq_max = self.frequency_range
|
||||
dist_min, dist_max = self.distance_range
|
||||
|
||||
return (
|
||||
freq_min <= input.frequency_mhz <= freq_max and
|
||||
dist_min <= input.distance_m <= dist_max
|
||||
)
|
||||
62
backend/app/propagation/cost231_hata.py
Normal file
62
backend/app/propagation/cost231_hata.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
COST-231 Hata model (extension of Okumura-Hata).
|
||||
|
||||
Valid for:
|
||||
- Frequency: 1500-2000 MHz
|
||||
- Distance: 1-20 km
|
||||
|
||||
Better for LTE bands than original Okumura-Hata.
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class Cost231HataModel(PropagationModel):
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "COST-231-Hata"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (1500, 2000)
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (100, 20000)
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
f = input.frequency_mhz
|
||||
d = max(input.distance_m / 1000, 0.1)
|
||||
hb = max(input.tx_height_m, 1.0)
|
||||
hm = max(input.rx_height_m, 1.0)
|
||||
|
||||
# Mobile antenna correction (medium city)
|
||||
a_hm = (1.1 * math.log10(f) - 0.7) * hm - (1.56 * math.log10(f) - 0.8)
|
||||
|
||||
# Metropolitan center correction
|
||||
C_m = 3 if input.environment == "urban" else 0
|
||||
|
||||
L = (
|
||||
46.3
|
||||
+ 33.9 * math.log10(f)
|
||||
- 13.82 * math.log10(hb)
|
||||
- a_hm
|
||||
+ (44.9 - 6.55 * math.log10(hb)) * math.log10(d)
|
||||
+ C_m
|
||||
)
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=False,
|
||||
breakdown={
|
||||
"base_loss": 46.3,
|
||||
"frequency_term": 33.9 * math.log10(f),
|
||||
"height_gain": -13.82 * math.log10(hb),
|
||||
"mobile_correction": -a_hm,
|
||||
"distance_term": (44.9 - 6.55 * math.log10(hb)) * math.log10(d),
|
||||
"metro_correction": C_m,
|
||||
},
|
||||
)
|
||||
114
backend/app/propagation/cost231_wi.py
Normal file
114
backend/app/propagation/cost231_wi.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
COST-231 Walfisch-Ikegami model.
|
||||
|
||||
Valid for:
|
||||
- Frequency: 800-2000 MHz
|
||||
- Distance: 20m-5km
|
||||
- Urban microcell environments
|
||||
|
||||
Accounts for building heights, street widths, and building separation.
|
||||
Reference: COST 231 Final Report, Chapter 4.
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class Cost231WIModel(PropagationModel):
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "COST-231-WI"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (800, 2000)
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (20, 5000)
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
f = input.frequency_mhz
|
||||
d = max(input.distance_m / 1000, 0.02) # km
|
||||
hb = max(input.tx_height_m, 4.0)
|
||||
hm = max(input.rx_height_m, 1.0)
|
||||
|
||||
# Building parameters (defaults for typical urban)
|
||||
h_roof = input.building_height_m or 15.0 # avg building height
|
||||
w = input.street_width_m or 20.0 # street width
|
||||
b = input.building_separation_m or 30.0 # building separation
|
||||
|
||||
delta_hb = hb - h_roof # TX above rooftop
|
||||
delta_hm = h_roof - hm # rooftop above RX
|
||||
|
||||
# Free space loss
|
||||
L_fs = 32.45 + 20 * math.log10(d) + 20 * math.log10(f)
|
||||
|
||||
# LOS case
|
||||
if delta_hb > 0 and d < 0.5:
|
||||
L = L_fs
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=True,
|
||||
breakdown={"free_space": L_fs, "rooftop_diffraction": 0, "multiscreen": 0},
|
||||
)
|
||||
|
||||
# Rooftop-to-street diffraction (L_rts)
|
||||
phi = 90.0 # street orientation angle (worst case)
|
||||
if phi < 35:
|
||||
L_ori = -10 + 0.354 * phi
|
||||
elif phi < 55:
|
||||
L_ori = 2.5 + 0.075 * (phi - 35)
|
||||
else:
|
||||
L_ori = 4.0 - 0.114 * (phi - 55)
|
||||
|
||||
L_rts = (
|
||||
-16.9
|
||||
- 10 * math.log10(w)
|
||||
+ 10 * math.log10(f)
|
||||
+ 20 * math.log10(delta_hm)
|
||||
+ L_ori
|
||||
)
|
||||
|
||||
# Multi-screen diffraction (L_msd)
|
||||
if delta_hb > 0:
|
||||
L_bsh = -18 * math.log10(1 + delta_hb)
|
||||
k_a = 54
|
||||
k_d = 18
|
||||
else:
|
||||
L_bsh = 0
|
||||
k_a = 54 - 0.8 * abs(delta_hb)
|
||||
if d >= 0.5:
|
||||
k_a = max(k_a, 54 - 0.8 * abs(delta_hb) * (d / 0.5))
|
||||
k_d = 18 - 15 * abs(delta_hb) / h_roof
|
||||
|
||||
k_f = -4 + 0.7 * (f / 925 - 1) # medium city
|
||||
if input.environment == "urban":
|
||||
k_f = -4 + 1.5 * (f / 925 - 1)
|
||||
|
||||
L_msd = (
|
||||
L_bsh
|
||||
+ k_a
|
||||
+ k_d * math.log10(d)
|
||||
+ k_f * math.log10(f)
|
||||
- 9 * math.log10(b)
|
||||
)
|
||||
|
||||
# Total NLOS loss
|
||||
if L_rts + L_msd > 0:
|
||||
L = L_fs + L_rts + L_msd
|
||||
else:
|
||||
L = L_fs
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=False,
|
||||
breakdown={
|
||||
"free_space": L_fs,
|
||||
"rooftop_diffraction": max(L_rts, 0),
|
||||
"multiscreen": max(L_msd, 0),
|
||||
},
|
||||
)
|
||||
43
backend/app/propagation/free_space.py
Normal file
43
backend/app/propagation/free_space.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Free Space Path Loss (FSPL) model.
|
||||
|
||||
Used as baseline and for LOS conditions.
|
||||
FSPL = 20*log10(d) + 20*log10(f) + 32.45
|
||||
where d in km, f in MHz
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class FreeSpaceModel(PropagationModel):
|
||||
"""Free Space Path Loss — theoretical minimum propagation loss."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "Free-Space"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (1, 100000)
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (1, 1000000) # 1m to 1000km
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
d_km = max(input.distance_m / 1000, 0.001)
|
||||
f = input.frequency_mhz
|
||||
|
||||
L = 20 * math.log10(d_km) + 20 * math.log10(f) + 32.45
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=True,
|
||||
breakdown={
|
||||
"distance_loss": 20 * math.log10(d_km),
|
||||
"frequency_loss": 20 * math.log10(f),
|
||||
"constant": 32.45,
|
||||
},
|
||||
)
|
||||
74
backend/app/propagation/itu_r_p1546.py
Normal file
74
backend/app/propagation/itu_r_p1546.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
ITU-R P.1546 model for point-to-area predictions.
|
||||
|
||||
Valid for:
|
||||
- Frequency: 30-3000 MHz
|
||||
- Distance: 1-1000 km
|
||||
- Time percentages: 1%, 10%, 50%
|
||||
|
||||
Best for: VHF/UHF broadcasting and land mobile services.
|
||||
Reference: ITU-R P.1546-6 (2019)
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class ITUR_P1546Model(PropagationModel):
|
||||
"""
|
||||
Simplified P.1546 implementation.
|
||||
|
||||
Full implementation would include terrain clearance angle,
|
||||
mixed path (land/sea), and time variability.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "ITU-R-P.1546"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (30, 3000)
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (1000, 1000000) # 1-1000 km
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
f = input.frequency_mhz
|
||||
d = max(input.distance_m / 1000, 1.0) # km
|
||||
h1 = max(input.tx_height_m, 1.0)
|
||||
|
||||
# Nominal frequency bands
|
||||
if f < 100:
|
||||
f_nom = 100
|
||||
elif f < 600:
|
||||
f_nom = 600
|
||||
else:
|
||||
f_nom = 2000
|
||||
|
||||
# Basic field strength at 1 kW ERP (from curves, simplified regression)
|
||||
E_ref = 106.9 - 20 * math.log10(d) # dBuV/m at 1kW
|
||||
|
||||
# Height gain for transmitter
|
||||
delta_h1 = 20 * math.log10(h1 / 10) if h1 > 10 else 0
|
||||
|
||||
# Frequency correction
|
||||
delta_f = 20 * math.log10(f / f_nom)
|
||||
|
||||
# Convert field strength to path loss
|
||||
# L = 139.3 - E + 20*log10(f) (for 50 Ohm)
|
||||
E = E_ref + delta_h1 - delta_f
|
||||
L = 139.3 - E + 20 * math.log10(f)
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=d < 5,
|
||||
breakdown={
|
||||
"reference_field": E_ref,
|
||||
"height_gain": delta_h1,
|
||||
"frequency_correction": delta_f,
|
||||
"path_loss": L,
|
||||
},
|
||||
)
|
||||
87
backend/app/propagation/itu_r_p526.py
Normal file
87
backend/app/propagation/itu_r_p526.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
Knife-edge diffraction model based on ITU-R P.526.
|
||||
|
||||
Used for calculating additional loss when terrain or obstacles
|
||||
block the line of sight between TX and RX.
|
||||
|
||||
Reference: ITU-R P.526-15
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
|
||||
class KnifeEdgeDiffractionModel:
|
||||
"""
|
||||
Single knife-edge diffraction model.
|
||||
|
||||
Stateless utility — not a full PropagationModel since it calculates
|
||||
additional loss, not total path loss.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def calculate_loss(
|
||||
d1_m: float,
|
||||
d2_m: float,
|
||||
h_m: float,
|
||||
wavelength_m: float,
|
||||
) -> float:
|
||||
"""
|
||||
Calculate diffraction loss over single knife edge.
|
||||
|
||||
Args:
|
||||
d1_m: Distance from TX to obstacle
|
||||
d2_m: Distance from obstacle to RX
|
||||
h_m: Obstacle height above LOS line (positive = above)
|
||||
wavelength_m: Signal wavelength
|
||||
|
||||
Returns:
|
||||
Loss in dB (always >= 0)
|
||||
"""
|
||||
if d1_m <= 0 or d2_m <= 0 or wavelength_m <= 0:
|
||||
return 0.0
|
||||
|
||||
# Fresnel-Kirchhoff parameter
|
||||
v = h_m * math.sqrt(2 * (d1_m + d2_m) / (wavelength_m * d1_m * d2_m))
|
||||
|
||||
# Diffraction loss (Lee approximation)
|
||||
if v < -0.78:
|
||||
L = 0.0
|
||||
elif v < 0:
|
||||
L = 6.02 + 9.11 * v - 1.27 * v ** 2
|
||||
elif v < 2.4:
|
||||
L = 6.02 + 9.11 * v + 1.65 * v ** 2
|
||||
else:
|
||||
L = 12.95 + 20 * math.log10(v)
|
||||
|
||||
return max(0.0, L)
|
||||
|
||||
@staticmethod
|
||||
def calculate_clearance_loss(
|
||||
clearance_m: float,
|
||||
frequency_mhz: float,
|
||||
) -> float:
|
||||
"""
|
||||
Simplified diffraction loss from terrain clearance.
|
||||
|
||||
Matches the existing coverage_service._diffraction_loss logic.
|
||||
|
||||
Args:
|
||||
clearance_m: Minimum LOS clearance (negative = blocked)
|
||||
frequency_mhz: Signal frequency
|
||||
|
||||
Returns:
|
||||
Loss in dB (0 if positive clearance)
|
||||
"""
|
||||
if clearance_m >= 0:
|
||||
return 0.0
|
||||
|
||||
v = abs(clearance_m) / 10
|
||||
|
||||
if v <= 0:
|
||||
loss = 0.0
|
||||
elif v < 2.4:
|
||||
loss = 6.02 + 9.11 * v - 1.27 * v ** 2
|
||||
else:
|
||||
loss = 13.0 + 20 * math.log10(v)
|
||||
|
||||
return min(loss, 40.0)
|
||||
75
backend/app/propagation/longley_rice.py
Normal file
75
backend/app/propagation/longley_rice.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
Longley-Rice Irregular Terrain Model (ITM).
|
||||
|
||||
Best for:
|
||||
- VHF/UHF over irregular terrain
|
||||
- Point-to-point links
|
||||
- Distances 1-2000 km
|
||||
|
||||
Note: This is a simplified area-mode version.
|
||||
Full implementation requires terrain profile data.
|
||||
|
||||
Reference: NTIA Report 82-100
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class LongleyRiceModel(PropagationModel):
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "Longley-Rice"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (20, 20000) # 20 MHz to 20 GHz
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (1000, 2000000) # 1-2000 km
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
"""
|
||||
Simplified Longley-Rice (area mode).
|
||||
|
||||
For proper implementation, use splat! or NTIA ITM reference.
|
||||
"""
|
||||
f = input.frequency_mhz
|
||||
d = max(input.distance_m / 1000, 1.0)
|
||||
h1 = max(input.tx_height_m, 1.0)
|
||||
h2 = max(input.rx_height_m, 1.0)
|
||||
|
||||
# Terrain irregularity parameter (simplified)
|
||||
delta_h = input.terrain_roughness_m or 90 # Default: rolling hills
|
||||
|
||||
# Free space loss
|
||||
L_fs = 32.45 + 20 * math.log10(d) + 20 * math.log10(f)
|
||||
|
||||
# Terrain clutter loss (simplified)
|
||||
if delta_h < 10:
|
||||
L_terrain = 0 # Flat
|
||||
elif delta_h < 50:
|
||||
L_terrain = 5 # Gently rolling
|
||||
elif delta_h < 150:
|
||||
L_terrain = 10 # Rolling hills
|
||||
else:
|
||||
L_terrain = 15 # Mountains
|
||||
|
||||
# Height gain
|
||||
h_eff = h1 + h2
|
||||
height_gain = 10 * math.log10(h_eff / 20) if h_eff > 20 else 0
|
||||
|
||||
L = L_fs + L_terrain - height_gain
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=delta_h < 10 and d < 10,
|
||||
breakdown={
|
||||
"free_space_loss": L_fs,
|
||||
"terrain_loss": L_terrain,
|
||||
"height_gain": height_gain,
|
||||
},
|
||||
)
|
||||
74
backend/app/propagation/okumura_hata.py
Normal file
74
backend/app/propagation/okumura_hata.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
Okumura-Hata empirical propagation model.
|
||||
|
||||
Valid for:
|
||||
- Frequency: 150-1500 MHz
|
||||
- Distance: 1-20 km
|
||||
- TX height: 30-200 m
|
||||
- RX height: 1-10 m
|
||||
|
||||
Reference: Hata (1980), "Empirical Formula for Propagation Loss
|
||||
in Land Mobile Radio Services"
|
||||
"""
|
||||
|
||||
import math
|
||||
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
|
||||
|
||||
|
||||
class OkumuraHataModel(PropagationModel):
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "Okumura-Hata"
|
||||
|
||||
@property
|
||||
def frequency_range(self) -> tuple:
|
||||
return (150, 1500)
|
||||
|
||||
@property
|
||||
def distance_range(self) -> tuple:
|
||||
return (100, 20000) # Extended to 100m minimum for practical use
|
||||
|
||||
def calculate(self, input: PropagationInput) -> PropagationOutput:
|
||||
f = input.frequency_mhz
|
||||
d = max(input.distance_m / 1000, 0.1) # km, min 100m
|
||||
hb = max(input.tx_height_m, 1.0)
|
||||
hm = max(input.rx_height_m, 1.0)
|
||||
|
||||
# Mobile antenna height correction factor
|
||||
if input.environment == "urban" and f >= 400:
|
||||
# Large city
|
||||
a_hm = 3.2 * (math.log10(11.75 * hm) ** 2) - 4.97
|
||||
else:
|
||||
# Medium/small city
|
||||
a_hm = (1.1 * math.log10(f) - 0.7) * hm - (1.56 * math.log10(f) - 0.8)
|
||||
|
||||
# Basic path loss (urban)
|
||||
L_urban = (
|
||||
69.55
|
||||
+ 26.16 * math.log10(f)
|
||||
- 13.82 * math.log10(hb)
|
||||
- a_hm
|
||||
+ (44.9 - 6.55 * math.log10(hb)) * math.log10(d)
|
||||
)
|
||||
|
||||
# Environment correction
|
||||
if input.environment == "suburban":
|
||||
L = L_urban - 2 * (math.log10(f / 28) ** 2) - 5.4
|
||||
elif input.environment == "rural":
|
||||
L = L_urban - 4.78 * (math.log10(f) ** 2) + 18.33 * math.log10(f) - 35.94
|
||||
elif input.environment == "open":
|
||||
L = L_urban - 4.78 * (math.log10(f) ** 2) + 18.33 * math.log10(f) - 40.94
|
||||
else:
|
||||
L = L_urban
|
||||
|
||||
return PropagationOutput(
|
||||
path_loss_db=L,
|
||||
model_name=self.name,
|
||||
is_los=False,
|
||||
breakdown={
|
||||
"basic_loss": L_urban,
|
||||
"environment_correction": L - L_urban,
|
||||
"antenna_correction": a_hm,
|
||||
},
|
||||
)
|
||||
122
backend/app/services/boundary_service.py
Normal file
122
backend/app/services/boundary_service.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""
|
||||
Coverage boundary calculation service.
|
||||
|
||||
Computes concave hull (alpha shape) from coverage points to generate
|
||||
a realistic boundary that follows actual coverage contour.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_coverage_boundary(
|
||||
points: list[dict],
|
||||
threshold_dbm: float = -100,
|
||||
simplify_tolerance: float = 0.001,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Calculate coverage boundary as concave hull of points above threshold.
|
||||
|
||||
Args:
|
||||
points: List of coverage points with 'lat', 'lon', 'rsrp' keys
|
||||
threshold_dbm: RSRP threshold - points below this are excluded
|
||||
simplify_tolerance: Simplification tolerance in degrees (~100m per 0.001)
|
||||
|
||||
Returns:
|
||||
List of {'lat': float, 'lon': float} coordinates forming boundary polygon.
|
||||
Empty list if boundary cannot be computed.
|
||||
"""
|
||||
try:
|
||||
from shapely.geometry import MultiPoint
|
||||
from shapely import concave_hull
|
||||
except ImportError:
|
||||
logger.warning("Shapely not installed - boundary calculation disabled")
|
||||
return []
|
||||
|
||||
# Filter points above threshold
|
||||
valid_coords = [
|
||||
(p['lon'], p['lat']) # Shapely uses (x, y) = (lon, lat)
|
||||
for p in points
|
||||
if p.get('rsrp', -999) >= threshold_dbm
|
||||
]
|
||||
|
||||
if len(valid_coords) < 3:
|
||||
logger.debug(f"Not enough points for boundary: {len(valid_coords)}")
|
||||
return []
|
||||
|
||||
try:
|
||||
# Create MultiPoint geometry
|
||||
mp = MultiPoint(valid_coords)
|
||||
|
||||
# Compute concave hull (alpha shape)
|
||||
# ratio: 0 = convex hull, 1 = very tight fit
|
||||
# 0.3-0.5 gives good balance between detail and smoothness
|
||||
hull = concave_hull(mp, ratio=0.3)
|
||||
|
||||
if hull.is_empty:
|
||||
logger.debug("Concave hull is empty")
|
||||
return []
|
||||
|
||||
# Simplify to reduce points (0.001 deg ≈ 100m)
|
||||
if simplify_tolerance > 0:
|
||||
hull = hull.simplify(simplify_tolerance, preserve_topology=True)
|
||||
|
||||
# Extract coordinates based on geometry type
|
||||
if hull.geom_type == 'Polygon':
|
||||
coords = list(hull.exterior.coords)
|
||||
return [{'lat': c[1], 'lon': c[0]} for c in coords]
|
||||
|
||||
elif hull.geom_type == 'MultiPolygon':
|
||||
# Return largest polygon's exterior
|
||||
largest = max(hull.geoms, key=lambda g: g.area)
|
||||
coords = list(largest.exterior.coords)
|
||||
return [{'lat': c[1], 'lon': c[0]} for c in coords]
|
||||
|
||||
elif hull.geom_type == 'GeometryCollection':
|
||||
# Find polygons in collection
|
||||
polygons = [g for g in hull.geoms if g.geom_type == 'Polygon']
|
||||
if polygons:
|
||||
largest = max(polygons, key=lambda g: g.area)
|
||||
coords = list(largest.exterior.coords)
|
||||
return [{'lat': c[1], 'lon': c[0]} for c in coords]
|
||||
|
||||
logger.debug(f"Unexpected hull geometry type: {hull.geom_type}")
|
||||
return []
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Boundary calculation error: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def calculate_multi_site_boundaries(
|
||||
points: list[dict],
|
||||
threshold_dbm: float = -100,
|
||||
) -> dict[str, list[dict]]:
|
||||
"""
|
||||
Calculate separate boundaries for each site's coverage area.
|
||||
|
||||
Args:
|
||||
points: Coverage points with 'lat', 'lon', 'rsrp', 'site_id' keys
|
||||
threshold_dbm: RSRP threshold
|
||||
|
||||
Returns:
|
||||
Dict mapping site_id to boundary coordinates list.
|
||||
"""
|
||||
# Group points by site_id
|
||||
by_site: dict[str, list[dict]] = {}
|
||||
for p in points:
|
||||
site_id = p.get('site_id', 'default')
|
||||
if site_id not in by_site:
|
||||
by_site[site_id] = []
|
||||
by_site[site_id].append(p)
|
||||
|
||||
# Calculate boundary for each site
|
||||
boundaries = {}
|
||||
for site_id, site_points in by_site.items():
|
||||
boundary = calculate_coverage_boundary(site_points, threshold_dbm)
|
||||
if boundary:
|
||||
boundaries[site_id] = boundary
|
||||
|
||||
return boundaries
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import re
|
||||
import asyncio
|
||||
import httpx
|
||||
import json
|
||||
from typing import List, Optional
|
||||
@@ -90,7 +91,10 @@ class BuildingsService:
|
||||
OpenStreetMap buildings via Overpass API with local caching.
|
||||
"""
|
||||
|
||||
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
|
||||
OVERPASS_URLS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
DEFAULT_LEVEL_HEIGHT = 3.0 # meters per floor
|
||||
DEFAULT_BUILDING_HEIGHT = 9.0 # 3 floors if unknown
|
||||
|
||||
@@ -152,7 +156,7 @@ class BuildingsService:
|
||||
self._memory_cache[bbox_key] = buildings
|
||||
return buildings
|
||||
|
||||
# Fetch from Overpass API
|
||||
# Fetch from Overpass API with retry
|
||||
print(f"[Buildings] Fetching from Overpass API...")
|
||||
|
||||
query = f"""
|
||||
@@ -166,17 +170,26 @@ class BuildingsService:
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
response = await client.post(
|
||||
self.OVERPASS_URL,
|
||||
data={"data": query}
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
print(f"[Buildings] Overpass API error: {e}")
|
||||
return []
|
||||
data = None
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
|
||||
try:
|
||||
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.post(url, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[Buildings] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = 2 ** attempt # 1s, 2s
|
||||
print(f"[Buildings] Retrying in {wait_time}s...")
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
print(f"[Buildings] All {max_retries} attempts failed")
|
||||
return []
|
||||
|
||||
buildings = self._parse_overpass_response(data)
|
||||
|
||||
|
||||
250
backend/app/services/cache.py
Normal file
250
backend/app/services/cache.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""
|
||||
Unified cache management for RFCP services.
|
||||
|
||||
Provides a single interface for managing all cached data:
|
||||
- Terrain tiles (SRTM .hgt files, in-memory NumPy arrays)
|
||||
- OSM building data (disk JSON + in-memory)
|
||||
- Spatial index data
|
||||
|
||||
Tracks memory usage and enforces limits to prevent
|
||||
memory explosion during large-area calculations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, Callable
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
class CacheEntry:
|
||||
"""Single cache entry with metadata."""
|
||||
__slots__ = ('value', 'created_at', 'last_accessed', 'size_bytes', 'hits')
|
||||
|
||||
def __init__(self, value: Any, size_bytes: int = 0):
|
||||
self.value = value
|
||||
self.created_at = time.monotonic()
|
||||
self.last_accessed = self.created_at
|
||||
self.size_bytes = size_bytes
|
||||
self.hits = 0
|
||||
|
||||
def touch(self):
|
||||
self.last_accessed = time.monotonic()
|
||||
self.hits += 1
|
||||
|
||||
|
||||
class MemoryCache:
|
||||
"""
|
||||
In-memory LRU cache with byte-level tracking.
|
||||
|
||||
Thread-safe. Evicts least-recently-used entries when
|
||||
max_size_bytes is exceeded.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, max_entries: int = 100, max_size_bytes: int = 500 * 1024 * 1024):
|
||||
self.name = name
|
||||
self.max_entries = max_entries
|
||||
self.max_size_bytes = max_size_bytes
|
||||
self._entries: Dict[str, CacheEntry] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._total_bytes = 0
|
||||
self._total_hits = 0
|
||||
self._total_misses = 0
|
||||
|
||||
def get(self, key: str) -> Optional[Any]:
|
||||
with self._lock:
|
||||
entry = self._entries.get(key)
|
||||
if entry is None:
|
||||
self._total_misses += 1
|
||||
return None
|
||||
entry.touch()
|
||||
self._total_hits += 1
|
||||
return entry.value
|
||||
|
||||
def put(self, key: str, value: Any, size_bytes: int = 0):
|
||||
with self._lock:
|
||||
# Remove existing entry if present
|
||||
if key in self._entries:
|
||||
self._total_bytes -= self._entries[key].size_bytes
|
||||
del self._entries[key]
|
||||
|
||||
# Evict if over limits
|
||||
while (
|
||||
len(self._entries) >= self.max_entries
|
||||
or (self._total_bytes + size_bytes > self.max_size_bytes and self._entries)
|
||||
):
|
||||
self._evict_lru()
|
||||
|
||||
entry = CacheEntry(value, size_bytes)
|
||||
self._entries[key] = entry
|
||||
self._total_bytes += size_bytes
|
||||
|
||||
def remove(self, key: str) -> bool:
|
||||
with self._lock:
|
||||
entry = self._entries.pop(key, None)
|
||||
if entry:
|
||||
self._total_bytes -= entry.size_bytes
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
with self._lock:
|
||||
self._entries.clear()
|
||||
self._total_bytes = 0
|
||||
|
||||
def _evict_lru(self):
|
||||
"""Remove least-recently-used entry. Must hold _lock."""
|
||||
if not self._entries:
|
||||
return
|
||||
lru_key = min(self._entries, key=lambda k: self._entries[k].last_accessed)
|
||||
entry = self._entries.pop(lru_key)
|
||||
self._total_bytes -= entry.size_bytes
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
return len(self._entries)
|
||||
|
||||
@property
|
||||
def size_bytes(self) -> int:
|
||||
return self._total_bytes
|
||||
|
||||
@property
|
||||
def size_mb(self) -> float:
|
||||
return self._total_bytes / (1024 * 1024)
|
||||
|
||||
def stats(self) -> dict:
|
||||
total = self._total_hits + self._total_misses
|
||||
return {
|
||||
"name": self.name,
|
||||
"entries": len(self._entries),
|
||||
"size_mb": round(self.size_mb, 1),
|
||||
"max_size_mb": round(self.max_size_bytes / (1024 * 1024), 1),
|
||||
"hits": self._total_hits,
|
||||
"misses": self._total_misses,
|
||||
"hit_rate": round(self._total_hits / total * 100, 1) if total > 0 else 0,
|
||||
}
|
||||
|
||||
|
||||
class DiskCache:
|
||||
"""
|
||||
Persistent disk cache with TTL expiry.
|
||||
|
||||
Used for OSM building data and other HTTP responses.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, base_path: Optional[Path] = None, ttl_days: int = 30):
|
||||
self.name = name
|
||||
self.ttl_days = ttl_days
|
||||
if base_path is None:
|
||||
base_path = Path(os.environ.get('RFCP_DATA_PATH', './data'))
|
||||
self.cache_path = base_path / 'cache' / name
|
||||
self.cache_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def _key_to_file(self, key: str) -> Path:
|
||||
# Sanitize key for filesystem
|
||||
safe = key.replace('/', '_').replace('\\', '_').replace(':', '_')
|
||||
return self.cache_path / f"{safe}.json"
|
||||
|
||||
def get(self, key: str) -> Optional[Any]:
|
||||
path = self._key_to_file(key)
|
||||
if not path.exists():
|
||||
return None
|
||||
try:
|
||||
data = json.loads(path.read_text())
|
||||
cached_at = datetime.fromisoformat(data.get('_ts', '2000-01-01'))
|
||||
if datetime.now() - cached_at > timedelta(days=self.ttl_days):
|
||||
path.unlink(missing_ok=True)
|
||||
return None
|
||||
return data.get('v')
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def put(self, key: str, value: Any):
|
||||
path = self._key_to_file(key)
|
||||
try:
|
||||
path.write_text(json.dumps({
|
||||
'_ts': datetime.now().isoformat(),
|
||||
'v': value,
|
||||
}))
|
||||
except Exception as e:
|
||||
print(f"[DiskCache:{self.name}] Write error: {e}")
|
||||
|
||||
def remove(self, key: str) -> bool:
|
||||
path = self._key_to_file(key)
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
for f in self.cache_path.glob("*.json"):
|
||||
f.unlink(missing_ok=True)
|
||||
|
||||
def size_mb(self) -> float:
|
||||
total = sum(f.stat().st_size for f in self.cache_path.glob("*.json") if f.exists())
|
||||
return total / (1024 * 1024)
|
||||
|
||||
def stats(self) -> dict:
|
||||
files = list(self.cache_path.glob("*.json"))
|
||||
return {
|
||||
"name": self.name,
|
||||
"entries": len(files),
|
||||
"size_mb": round(self.size_mb(), 1),
|
||||
"ttl_days": self.ttl_days,
|
||||
}
|
||||
|
||||
|
||||
class CacheManager:
|
||||
"""
|
||||
Unified cache manager for all RFCP services.
|
||||
|
||||
Provides:
|
||||
- terrain: MemoryCache for SRTM tile arrays (~25MB each)
|
||||
- buildings: MemoryCache for building lists
|
||||
- spatial: MemoryCache for spatial index objects
|
||||
- osm_disk: DiskCache for OSM API responses
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.terrain = MemoryCache(
|
||||
"terrain",
|
||||
max_entries=20, # ~500MB max (25MB per tile)
|
||||
max_size_bytes=500 * 1024 * 1024,
|
||||
)
|
||||
self.buildings = MemoryCache(
|
||||
"buildings",
|
||||
max_entries=50,
|
||||
max_size_bytes=200 * 1024 * 1024,
|
||||
)
|
||||
self.spatial = MemoryCache(
|
||||
"spatial_index",
|
||||
max_entries=50,
|
||||
max_size_bytes=100 * 1024 * 1024,
|
||||
)
|
||||
self.osm_disk = DiskCache("osm", ttl_days=30)
|
||||
|
||||
def clear_all(self):
|
||||
"""Clear all caches."""
|
||||
self.terrain.clear()
|
||||
self.buildings.clear()
|
||||
self.spatial.clear()
|
||||
self.osm_disk.clear()
|
||||
|
||||
def stats(self) -> dict:
|
||||
"""Get stats for all caches."""
|
||||
return {
|
||||
"terrain": self.terrain.stats(),
|
||||
"buildings": self.buildings.stats(),
|
||||
"spatial": self.spatial.stats(),
|
||||
"osm_disk": self.osm_disk.stats(),
|
||||
"total_memory_mb": round(
|
||||
self.terrain.size_mb + self.buildings.size_mb + self.spatial.size_mb, 1
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
# Singleton
|
||||
cache_manager = CacheManager()
|
||||
241
backend/app/services/cache_db.py
Normal file
241
backend/app/services/cache_db.py
Normal file
@@ -0,0 +1,241 @@
|
||||
"""
|
||||
SQLite cache for OSM data — buildings, vegetation, water, streets.
|
||||
|
||||
Replaces in-memory caching for large-area calculations. Instead of holding
|
||||
hundreds of thousands of buildings in RAM, data is stored on disk in SQLite
|
||||
and queried per-tile using spatial bbox queries.
|
||||
|
||||
Location: ~/.rfcp/osm_cache.db
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
|
||||
def _default_db_path() -> str:
|
||||
"""Get default database path at ~/.rfcp/osm_cache.db."""
|
||||
cache_dir = Path.home() / '.rfcp'
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
return str(cache_dir / 'osm_cache.db')
|
||||
|
||||
|
||||
class OSMCacheDB:
|
||||
"""SQLite-backed cache for OSM feature data with bbox queries.
|
||||
|
||||
Stores buildings and vegetation as JSON blobs with bounding-box
|
||||
columns for fast spatial queries. Cache freshness is tracked
|
||||
per 1-degree cell (matching the OSM grid fetch pattern).
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: Optional[str] = None):
|
||||
if db_path is None:
|
||||
db_path = _default_db_path()
|
||||
self.db_path = db_path
|
||||
self._conn: Optional[sqlite3.Connection] = None
|
||||
|
||||
@property
|
||||
def conn(self) -> sqlite3.Connection:
|
||||
"""Lazy connection with WAL mode for concurrent reads."""
|
||||
if self._conn is None:
|
||||
self._conn = sqlite3.connect(self.db_path, check_same_thread=False)
|
||||
self._conn.execute("PRAGMA journal_mode=WAL")
|
||||
self._conn.execute("PRAGMA synchronous=NORMAL")
|
||||
self._init_tables()
|
||||
return self._conn
|
||||
|
||||
def _init_tables(self):
|
||||
assert self._conn is not None
|
||||
self._conn.executescript("""
|
||||
CREATE TABLE IF NOT EXISTS buildings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
osm_id INTEGER,
|
||||
min_lat REAL NOT NULL,
|
||||
min_lon REAL NOT NULL,
|
||||
max_lat REAL NOT NULL,
|
||||
max_lon REAL NOT NULL,
|
||||
height REAL DEFAULT 10.0,
|
||||
data TEXT NOT NULL,
|
||||
cell_key TEXT NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_bld_cell ON buildings(cell_key);
|
||||
CREATE INDEX IF NOT EXISTS idx_bld_bbox
|
||||
ON buildings(min_lat, max_lat, min_lon, max_lon);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS vegetation (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
osm_id INTEGER,
|
||||
min_lat REAL NOT NULL,
|
||||
min_lon REAL NOT NULL,
|
||||
max_lat REAL NOT NULL,
|
||||
max_lon REAL NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
cell_key TEXT NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_veg_cell ON vegetation(cell_key);
|
||||
CREATE INDEX IF NOT EXISTS idx_veg_bbox
|
||||
ON vegetation(min_lat, max_lat, min_lon, max_lon);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cache_meta (
|
||||
cell_key TEXT NOT NULL,
|
||||
data_type TEXT NOT NULL,
|
||||
fetched_at REAL NOT NULL,
|
||||
item_count INTEGER DEFAULT 0,
|
||||
PRIMARY KEY (cell_key, data_type)
|
||||
);
|
||||
""")
|
||||
self._conn.commit()
|
||||
|
||||
# ── Cell key helpers ──
|
||||
|
||||
@staticmethod
|
||||
def cell_key(min_lat: float, min_lon: float, max_lat: float, max_lon: float) -> str:
|
||||
"""Generate cell key from bbox (matches 1-degree grid alignment)."""
|
||||
return f"{min_lat:.0f},{min_lon:.0f},{max_lat:.0f},{max_lon:.0f}"
|
||||
|
||||
def is_cell_cached(
|
||||
self, cell_key: str, data_type: str, max_age_hours: float = 24.0
|
||||
) -> bool:
|
||||
"""Check if cell data is cached and fresh."""
|
||||
cursor = self.conn.execute(
|
||||
"SELECT fetched_at FROM cache_meta "
|
||||
"WHERE cell_key = ? AND data_type = ?",
|
||||
(cell_key, data_type),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
age_hours = (time.time() - row[0]) / 3600
|
||||
return age_hours < max_age_hours
|
||||
|
||||
def mark_cell_cached(self, cell_key: str, data_type: str, item_count: int):
|
||||
"""Record that a cell has been fetched."""
|
||||
self.conn.execute(
|
||||
"INSERT OR REPLACE INTO cache_meta "
|
||||
"(cell_key, data_type, fetched_at, item_count) VALUES (?, ?, ?, ?)",
|
||||
(cell_key, data_type, time.time(), item_count),
|
||||
)
|
||||
self.conn.commit()
|
||||
|
||||
# ── Buildings ──
|
||||
|
||||
def insert_buildings_bulk(self, buildings_data: List[Dict], cell_key: str):
|
||||
"""Bulk insert serialised building dicts for a cell.
|
||||
|
||||
Each dict must have 'geometry' (list of [lon, lat]) and 'id'.
|
||||
"""
|
||||
rows = []
|
||||
for b in buildings_data:
|
||||
geom = b.get('geometry', [])
|
||||
if not geom:
|
||||
continue
|
||||
lats = [p[1] for p in geom]
|
||||
lons = [p[0] for p in geom]
|
||||
rows.append((
|
||||
b.get('id', 0),
|
||||
min(lats), min(lons), max(lats), max(lons),
|
||||
b.get('height', 10.0),
|
||||
json.dumps(b),
|
||||
cell_key,
|
||||
))
|
||||
|
||||
if rows:
|
||||
self.conn.executemany(
|
||||
"INSERT INTO buildings "
|
||||
"(osm_id, min_lat, min_lon, max_lat, max_lon, height, data, cell_key) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
rows,
|
||||
)
|
||||
self.conn.commit()
|
||||
|
||||
def query_buildings_bbox(
|
||||
self,
|
||||
min_lat: float, max_lat: float,
|
||||
min_lon: float, max_lon: float,
|
||||
limit: int = 20000,
|
||||
) -> List[Dict]:
|
||||
"""Query buildings whose bbox overlaps the given bbox."""
|
||||
cursor = self.conn.execute(
|
||||
"SELECT data FROM buildings "
|
||||
"WHERE max_lat >= ? AND min_lat <= ? "
|
||||
"AND max_lon >= ? AND min_lon <= ? "
|
||||
"LIMIT ?",
|
||||
(min_lat, max_lat, min_lon, max_lon, limit),
|
||||
)
|
||||
return [json.loads(row[0]) for row in cursor]
|
||||
|
||||
# ── Vegetation ──
|
||||
|
||||
def insert_vegetation_bulk(self, veg_data: List[Dict], cell_key: str):
|
||||
"""Bulk insert serialised vegetation dicts for a cell."""
|
||||
rows = []
|
||||
for v in veg_data:
|
||||
geom = v.get('geometry', [])
|
||||
if not geom:
|
||||
continue
|
||||
lats = [p[1] for p in geom]
|
||||
lons = [p[0] for p in geom]
|
||||
rows.append((
|
||||
v.get('id', 0),
|
||||
min(lats), min(lons), max(lats), max(lons),
|
||||
json.dumps(v),
|
||||
cell_key,
|
||||
))
|
||||
|
||||
if rows:
|
||||
self.conn.executemany(
|
||||
"INSERT INTO vegetation "
|
||||
"(osm_id, min_lat, min_lon, max_lat, max_lon, data, cell_key) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
rows,
|
||||
)
|
||||
self.conn.commit()
|
||||
|
||||
def query_vegetation_bbox(
|
||||
self,
|
||||
min_lat: float, max_lat: float,
|
||||
min_lon: float, max_lon: float,
|
||||
limit: int = 10000,
|
||||
) -> List[Dict]:
|
||||
"""Query vegetation whose bbox overlaps the given bbox."""
|
||||
cursor = self.conn.execute(
|
||||
"SELECT data FROM vegetation "
|
||||
"WHERE max_lat >= ? AND min_lat <= ? "
|
||||
"AND max_lon >= ? AND min_lon <= ? "
|
||||
"LIMIT ?",
|
||||
(min_lat, max_lat, min_lon, max_lon, limit),
|
||||
)
|
||||
return [json.loads(row[0]) for row in cursor]
|
||||
|
||||
# ── Housekeeping ──
|
||||
|
||||
def close(self):
|
||||
"""Close the database connection."""
|
||||
if self._conn:
|
||||
self._conn.close()
|
||||
self._conn = None
|
||||
|
||||
def get_stats(self) -> Dict[str, int]:
|
||||
"""Get cache statistics."""
|
||||
stats: Dict[str, int] = {}
|
||||
for table in ('buildings', 'vegetation'):
|
||||
cursor = self.conn.execute(f"SELECT COUNT(*) FROM {table}") # noqa: S608
|
||||
stats[table] = cursor.fetchone()[0]
|
||||
cursor = self.conn.execute("SELECT COUNT(*) FROM cache_meta")
|
||||
stats['cached_cells'] = cursor.fetchone()[0]
|
||||
return stats
|
||||
|
||||
|
||||
# ── Singleton ──
|
||||
|
||||
_cache_db: Optional[OSMCacheDB] = None
|
||||
|
||||
|
||||
def get_osm_cache_db() -> OSMCacheDB:
|
||||
"""Get or create the singleton OSM cache database."""
|
||||
global _cache_db
|
||||
if _cache_db is None:
|
||||
_cache_db = OSMCacheDB()
|
||||
return _cache_db
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,53 @@
|
||||
import time
|
||||
import numpy as np
|
||||
from typing import List, Tuple, Optional, TYPE_CHECKING
|
||||
from enum import Enum
|
||||
from typing import List, Tuple, Optional, Dict, Any, TYPE_CHECKING
|
||||
from dataclasses import dataclass
|
||||
from app.services.terrain_service import terrain_service
|
||||
from app.services.buildings_service import buildings_service, Building
|
||||
from app.services.materials_service import materials_service, BuildingMaterial
|
||||
from app.services.geometry_vectorized import (
|
||||
points_to_local_coords,
|
||||
line_intersects_polygons_batch,
|
||||
find_best_reflection_path_vectorized,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
|
||||
|
||||
# ── Level of Detail (LOD) for dominant path calculations ──
|
||||
|
||||
class LODLevel(Enum):
|
||||
"""Distance-based level of detail for dominant path analysis.
|
||||
|
||||
At long distances, building-level multipath contributes minimally
|
||||
to path loss — macro propagation models suffice.
|
||||
"""
|
||||
NONE = "none" # Skip dominant path entirely
|
||||
SIMPLIFIED = "simplified" # Check only nearest few buildings
|
||||
FULL = "full" # Full calculation (current behavior)
|
||||
|
||||
|
||||
# LOD distance thresholds (meters)
|
||||
LOD_THRESHOLD_NONE = 3000 # >3km: skip dominant path
|
||||
LOD_THRESHOLD_SIMPLIFIED = 1500 # 1.5-3km: simplified mode
|
||||
|
||||
# Simplified mode limits
|
||||
SIMPLIFIED_MAX_BUILDINGS = 5
|
||||
SIMPLIFIED_MAX_WALLS = 50
|
||||
|
||||
|
||||
def get_lod_level(distance_m: float) -> LODLevel:
|
||||
"""Determine LOD level based on TX-RX distance."""
|
||||
if distance_m > LOD_THRESHOLD_NONE:
|
||||
return LODLevel.NONE
|
||||
elif distance_m > LOD_THRESHOLD_SIMPLIFIED:
|
||||
return LODLevel.SIMPLIFIED
|
||||
else:
|
||||
return LODLevel.FULL
|
||||
|
||||
|
||||
@dataclass
|
||||
class RayPath:
|
||||
"""Single ray path from TX to RX"""
|
||||
@@ -21,9 +59,9 @@ class RayPath:
|
||||
is_valid: bool # Does this path exist?
|
||||
|
||||
|
||||
MAX_BUILDINGS_FOR_LINE = 50
|
||||
MAX_BUILDINGS_FOR_REFLECTION = 30
|
||||
MAX_DISTANCE_FROM_PATH = 300 # meters
|
||||
MAX_BUILDINGS_FOR_LINE = 30
|
||||
MAX_BUILDINGS_FOR_REFLECTION = 20
|
||||
MAX_DISTANCE_FROM_PATH = 200 # meters
|
||||
|
||||
|
||||
def _filter_buildings_by_distance(buildings, tx_point, rx_point, max_count=100, max_distance=500):
|
||||
@@ -60,6 +98,258 @@ def _filter_buildings_by_distance(buildings, tx_point, rx_point, max_count=100,
|
||||
return filtered[:max_count]
|
||||
|
||||
|
||||
# ── Vectorized dominant path (NumPy) ──
|
||||
|
||||
_vec_log_count = 0
|
||||
|
||||
|
||||
def _buildings_to_arrays(buildings: List[Building], ref_lat: float, ref_lon: float):
|
||||
"""Convert Building objects to numpy arrays for vectorized geometry.
|
||||
|
||||
Returns:
|
||||
walls_start: (W, 2) wall start points in local XY meters
|
||||
walls_end: (W, 2) wall end points in local XY meters
|
||||
wall_to_building: (W,) mapping wall index -> building index
|
||||
poly_x: flattened polygon X coords
|
||||
poly_y: flattened polygon Y coords
|
||||
poly_lengths: (num_polygons,) vertices per polygon
|
||||
"""
|
||||
all_walls_start = []
|
||||
all_walls_end = []
|
||||
wall_to_building = []
|
||||
|
||||
all_poly_x = []
|
||||
all_poly_y = []
|
||||
poly_lengths = []
|
||||
|
||||
for i, b in enumerate(buildings):
|
||||
geom = b.geometry # [[lon, lat], ...]
|
||||
if not geom or len(geom) < 3:
|
||||
poly_lengths.append(0)
|
||||
continue
|
||||
|
||||
poly_lats = np.array([p[1] for p in geom])
|
||||
poly_lons = np.array([p[0] for p in geom])
|
||||
px, py = points_to_local_coords(ref_lat, ref_lon, poly_lats, poly_lons)
|
||||
|
||||
all_poly_x.extend(px)
|
||||
all_poly_y.extend(py)
|
||||
poly_lengths.append(len(geom))
|
||||
|
||||
# Extract wall segments
|
||||
for j in range(len(geom) - 1):
|
||||
all_walls_start.append([px[j], py[j]])
|
||||
all_walls_end.append([px[j + 1], py[j + 1]])
|
||||
wall_to_building.append(i)
|
||||
|
||||
return (
|
||||
np.array(all_walls_start) if all_walls_start else np.zeros((0, 2)),
|
||||
np.array(all_walls_end) if all_walls_end else np.zeros((0, 2)),
|
||||
np.array(wall_to_building, dtype=int) if wall_to_building else np.zeros(0, dtype=int),
|
||||
np.array(all_poly_x) if all_poly_x else np.zeros(0),
|
||||
np.array(all_poly_y) if all_poly_y else np.zeros(0),
|
||||
np.array(poly_lengths, dtype=int),
|
||||
)
|
||||
|
||||
|
||||
def find_dominant_paths_vectorized(
|
||||
tx_lat: float, tx_lon: float, tx_height: float,
|
||||
rx_lat: float, rx_lon: float, rx_height: float,
|
||||
frequency_mhz: float,
|
||||
buildings: List[Building],
|
||||
spatial_idx: 'Optional[SpatialIndex]' = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Vectorized dominant path finding using NumPy batch operations.
|
||||
|
||||
Replaces the loop-based find_dominant_paths_sync() with:
|
||||
1. Batch building-to-array conversion
|
||||
2. Vectorized LOS polygon intersection check
|
||||
3. Vectorized reflection point calculation
|
||||
4. Simplified diffraction estimate
|
||||
|
||||
Callers should pass pre-filtered buildings and spatial_idx=None
|
||||
to avoid redundant spatial queries (coverage_service already filters).
|
||||
|
||||
Returns dict with:
|
||||
has_los, path_type, total_loss, path_length, reflection_point
|
||||
"""
|
||||
global _vec_log_count
|
||||
t_total = time.perf_counter()
|
||||
|
||||
# Fast path: no buildings at all → direct LOS, skip all numpy work
|
||||
has_spatial_data = spatial_idx is not None and spatial_idx._grid
|
||||
if not buildings and not has_spatial_data:
|
||||
return {
|
||||
'has_los': True,
|
||||
'path_type': 'direct',
|
||||
'total_loss': 0.0,
|
||||
'path_length': 0.0,
|
||||
'reflection_point': None,
|
||||
}
|
||||
|
||||
# Get nearby buildings via spatial index or use pre-filtered list
|
||||
t0 = time.perf_counter()
|
||||
if spatial_idx:
|
||||
line_buildings = spatial_idx.query_line(tx_lat, tx_lon, rx_lat, rx_lon)
|
||||
else:
|
||||
line_buildings = buildings
|
||||
t_query = time.perf_counter() - t0
|
||||
|
||||
# No nearby buildings along this line → direct LOS
|
||||
if not line_buildings:
|
||||
return {
|
||||
'has_los': True,
|
||||
'path_type': 'direct',
|
||||
'total_loss': 0.0,
|
||||
'path_length': 0.0,
|
||||
'reflection_point': None,
|
||||
}
|
||||
|
||||
t0 = time.perf_counter()
|
||||
line_buildings = _filter_buildings_by_distance(
|
||||
line_buildings,
|
||||
(tx_lat, tx_lon), (rx_lat, rx_lon),
|
||||
max_count=MAX_BUILDINGS_FOR_LINE,
|
||||
max_distance=MAX_DISTANCE_FROM_PATH,
|
||||
)
|
||||
t_filter = time.perf_counter() - t0
|
||||
|
||||
# Reference point for local coordinate system
|
||||
ref_lat = (tx_lat + rx_lat) / 2
|
||||
ref_lon = (tx_lon + rx_lon) / 2
|
||||
|
||||
# Convert TX/RX to local meters
|
||||
tx_xy = points_to_local_coords(ref_lat, ref_lon, np.array([tx_lat]), np.array([tx_lon]))
|
||||
rx_xy = points_to_local_coords(ref_lat, ref_lon, np.array([rx_lat]), np.array([rx_lon]))
|
||||
tx = np.array([tx_xy[0][0], tx_xy[1][0]])
|
||||
rx = np.array([rx_xy[0][0], rx_xy[1][0]])
|
||||
|
||||
direct_dist = np.linalg.norm(rx - tx)
|
||||
|
||||
# Convert buildings to arrays
|
||||
t0 = time.perf_counter()
|
||||
walls_start, walls_end, wall_to_bldg, poly_x, poly_y, poly_lengths = (
|
||||
_buildings_to_arrays(line_buildings, ref_lat, ref_lon)
|
||||
)
|
||||
t_arrays = time.perf_counter() - t0
|
||||
|
||||
# No buildings → direct LOS
|
||||
if len(poly_lengths) == 0 or np.all(poly_lengths < 3):
|
||||
return {
|
||||
'has_los': True,
|
||||
'path_type': 'direct',
|
||||
'total_loss': 0.0,
|
||||
'path_length': direct_dist,
|
||||
'reflection_point': None,
|
||||
}
|
||||
|
||||
# Step 1: Vectorized direct LOS check
|
||||
t0 = time.perf_counter()
|
||||
intersects, _ = line_intersects_polygons_batch(tx, rx, poly_x, poly_y, poly_lengths)
|
||||
t_los = time.perf_counter() - t0
|
||||
|
||||
if not np.any(intersects):
|
||||
t_total_ms = (time.perf_counter() - t_total) * 1000
|
||||
_vec_log_count += 1
|
||||
if _vec_log_count <= 10:
|
||||
print(
|
||||
f"[DP_TIMING] #{_vec_log_count} LOS_CLEAR "
|
||||
f"bldgs={len(line_buildings)} walls={len(walls_start)} "
|
||||
f"query={t_query*1000:.1f}ms filter={t_filter*1000:.1f}ms "
|
||||
f"arrays={t_arrays*1000:.1f}ms los={t_los*1000:.1f}ms "
|
||||
f"total={t_total_ms:.1f}ms",
|
||||
flush=True,
|
||||
)
|
||||
return {
|
||||
'has_los': True,
|
||||
'path_type': 'direct',
|
||||
'total_loss': 0.0,
|
||||
'path_length': direct_dist,
|
||||
'reflection_point': None,
|
||||
}
|
||||
|
||||
# Step 2: Vectorized reflection path finding
|
||||
# Reuse line buildings for reflection (no separate spatial query)
|
||||
t0 = time.perf_counter()
|
||||
if spatial_idx:
|
||||
mid_lat = (tx_lat + rx_lat) / 2
|
||||
mid_lon = (tx_lon + rx_lon) / 2
|
||||
refl_buildings = spatial_idx.query_point(mid_lat, mid_lon, buffer_cells=3)
|
||||
refl_buildings = _filter_buildings_by_distance(
|
||||
refl_buildings,
|
||||
(tx_lat, tx_lon), (rx_lat, rx_lon),
|
||||
max_count=MAX_BUILDINGS_FOR_REFLECTION,
|
||||
max_distance=MAX_DISTANCE_FROM_PATH,
|
||||
)
|
||||
# Merge line + reflection buildings (deduplicate by id)
|
||||
seen_ids = {b.id for b in line_buildings}
|
||||
merged = list(line_buildings)
|
||||
for b in refl_buildings:
|
||||
if b.id not in seen_ids:
|
||||
merged.append(b)
|
||||
seen_ids.add(b.id)
|
||||
r_walls_start, r_walls_end, r_wall_to_bldg, r_poly_x, r_poly_y, r_poly_lengths = (
|
||||
_buildings_to_arrays(merged, ref_lat, ref_lon)
|
||||
)
|
||||
else:
|
||||
r_walls_start, r_walls_end, r_wall_to_bldg = walls_start, walls_end, wall_to_bldg
|
||||
r_poly_x, r_poly_y, r_poly_lengths = poly_x, poly_y, poly_lengths
|
||||
t_refl_setup = time.perf_counter() - t0
|
||||
|
||||
t0 = time.perf_counter()
|
||||
refl_point, refl_length, refl_loss = find_best_reflection_path_vectorized(
|
||||
tx, rx,
|
||||
r_walls_start, r_walls_end, r_wall_to_bldg,
|
||||
r_poly_x, r_poly_y, r_poly_lengths,
|
||||
max_candidates=30,
|
||||
max_walls=100,
|
||||
max_los_checks=5,
|
||||
)
|
||||
t_refl_calc = time.perf_counter() - t0
|
||||
|
||||
t_total_ms = (time.perf_counter() - t_total) * 1000
|
||||
|
||||
# Diagnostic log for first few points
|
||||
_vec_log_count += 1
|
||||
if _vec_log_count <= 10:
|
||||
print(
|
||||
f"[DP_TIMING] #{_vec_log_count} LOS_BLOCKED "
|
||||
f"bldgs={len(line_buildings)} walls={len(walls_start)} "
|
||||
f"dist={direct_dist:.0f}m "
|
||||
f"query={t_query*1000:.1f}ms filter={t_filter*1000:.1f}ms "
|
||||
f"arrays={t_arrays*1000:.1f}ms los={t_los*1000:.1f}ms "
|
||||
f"refl_setup={t_refl_setup*1000:.1f}ms refl_calc={t_refl_calc*1000:.1f}ms "
|
||||
f"total={t_total_ms:.1f}ms",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
if refl_point is not None:
|
||||
# Convert reflection point back to lat/lon
|
||||
cos_lat = np.cos(np.radians(ref_lat))
|
||||
refl_lat = ref_lat + refl_point[1] / 110540.0
|
||||
refl_lon = ref_lon + refl_point[0] / (111320.0 * cos_lat)
|
||||
|
||||
return {
|
||||
'has_los': False,
|
||||
'path_type': 'reflection',
|
||||
'total_loss': refl_loss,
|
||||
'path_length': refl_length,
|
||||
'reflection_point': (refl_lat, refl_lon),
|
||||
}
|
||||
|
||||
# Step 3: Diffraction fallback
|
||||
num_blocking = int(np.sum(intersects))
|
||||
diffraction_loss = 10.0 + 5.0 * min(num_blocking, 5)
|
||||
|
||||
return {
|
||||
'has_los': False,
|
||||
'path_type': 'diffraction',
|
||||
'total_loss': diffraction_loss,
|
||||
'path_length': direct_dist,
|
||||
'reflection_point': None,
|
||||
}
|
||||
|
||||
|
||||
class DominantPathService:
|
||||
"""
|
||||
Find dominant propagation paths (2-3 strongest)
|
||||
@@ -451,6 +741,19 @@ class DominantPathService:
|
||||
buildings: fallback list (only used if spatial_idx is None)
|
||||
spatial_idx: grid-based spatial index for fast local queries
|
||||
"""
|
||||
# Fast path: no buildings at all → direct LOS only
|
||||
has_spatial_data = spatial_idx is not None and spatial_idx._grid
|
||||
if not buildings and not has_spatial_data:
|
||||
distance = terrain_service.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
|
||||
return [RayPath(
|
||||
path_type="direct",
|
||||
total_distance=distance,
|
||||
path_loss=self._calculate_path_loss(distance, frequency_mhz, tx_height, rx_height),
|
||||
reflection_points=[],
|
||||
materials_crossed=[],
|
||||
is_valid=True,
|
||||
)]
|
||||
|
||||
paths = []
|
||||
|
||||
# Use spatial index to get only buildings along the TX→RX line
|
||||
|
||||
309
backend/app/services/geometry_vectorized.py
Normal file
309
backend/app/services/geometry_vectorized.py
Normal file
@@ -0,0 +1,309 @@
|
||||
"""
|
||||
Vectorized geometry operations using NumPy.
|
||||
|
||||
All functions operate on arrays, not single values.
|
||||
Provides 10-50x speedup over Python loops for batch geometry checks.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Tuple, Optional
|
||||
|
||||
EARTH_RADIUS = 6371000 # meters
|
||||
|
||||
|
||||
def haversine_batch(
|
||||
lat1: float, lon1: float,
|
||||
lats2: np.ndarray, lons2: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
"""Distance from one point to many points (meters)."""
|
||||
lat1_rad = np.radians(lat1)
|
||||
lon1_rad = np.radians(lon1)
|
||||
lats2_rad = np.radians(lats2)
|
||||
lons2_rad = np.radians(lons2)
|
||||
|
||||
dlat = lats2_rad - lat1_rad
|
||||
dlon = lons2_rad - lon1_rad
|
||||
|
||||
a = np.sin(dlat / 2) ** 2 + np.cos(lat1_rad) * np.cos(lats2_rad) * np.sin(dlon / 2) ** 2
|
||||
c = 2 * np.arcsin(np.sqrt(a))
|
||||
|
||||
return EARTH_RADIUS * c
|
||||
|
||||
|
||||
def points_to_local_coords(
|
||||
ref_lat: float, ref_lon: float,
|
||||
lats: np.ndarray, lons: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Convert lat/lon to local X/Y meters (equirectangular projection)."""
|
||||
cos_lat = np.cos(np.radians(ref_lat))
|
||||
x = (lons - ref_lon) * 111320.0 * cos_lat
|
||||
y = (lats - ref_lat) * 110540.0
|
||||
return x, y
|
||||
|
||||
|
||||
def line_segments_intersect_batch(
|
||||
p1: np.ndarray, p2: np.ndarray,
|
||||
segments_start: np.ndarray, segments_end: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Check if line p1->p2 intersects with N segments.
|
||||
|
||||
Args:
|
||||
p1, p2: shape (2,)
|
||||
segments_start, segments_end: shape (N, 2)
|
||||
|
||||
Returns:
|
||||
intersects: bool array (N,)
|
||||
t_values: parameter along p1->p2 (N,)
|
||||
"""
|
||||
d = p2 - p1
|
||||
seg_d = segments_end - segments_start
|
||||
|
||||
cross = d[0] * seg_d[:, 1] - d[1] * seg_d[:, 0]
|
||||
|
||||
parallel_mask = np.abs(cross) < 1e-10
|
||||
cross_safe = np.where(parallel_mask, 1.0, cross)
|
||||
|
||||
dp = p1 - segments_start
|
||||
|
||||
t = (dp[:, 0] * seg_d[:, 1] - dp[:, 1] * seg_d[:, 0]) / cross_safe
|
||||
u = (dp[:, 0] * d[1] - dp[:, 1] * d[0]) / cross_safe
|
||||
|
||||
intersects = ~parallel_mask & (t >= 0) & (t <= 1) & (u >= 0) & (u <= 1)
|
||||
|
||||
return intersects, t
|
||||
|
||||
|
||||
def line_intersects_polygons_batch(
|
||||
p1: np.ndarray, p2: np.ndarray,
|
||||
polygons_x: np.ndarray, polygons_y: np.ndarray,
|
||||
polygon_lengths: np.ndarray,
|
||||
max_polygons: int = 30,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Check if line p1->p2 intersects multiple polygons.
|
||||
|
||||
Args:
|
||||
p1, p2: shape (2,)
|
||||
polygons_x, polygons_y: flattened vertex arrays
|
||||
polygon_lengths: vertices per polygon (num_polygons,)
|
||||
max_polygons: only check nearest N polygons (bbox pre-filter)
|
||||
|
||||
Returns:
|
||||
intersects: bool (num_polygons,)
|
||||
min_distances: distance to first hit (num_polygons,)
|
||||
"""
|
||||
num_polygons = len(polygon_lengths)
|
||||
|
||||
if num_polygons == 0:
|
||||
return np.array([], dtype=bool), np.array([])
|
||||
|
||||
intersects = np.zeros(num_polygons, dtype=bool)
|
||||
min_t = np.full(num_polygons, np.inf)
|
||||
|
||||
# Pre-filter: only check polygons whose first vertex is near the line bbox
|
||||
if num_polygons > max_polygons:
|
||||
buf = 50.0 # 50m buffer
|
||||
line_min_x = min(p1[0], p2[0]) - buf
|
||||
line_max_x = max(p1[0], p2[0]) + buf
|
||||
line_min_y = min(p1[1], p2[1]) - buf
|
||||
line_max_y = max(p1[1], p2[1]) + buf
|
||||
|
||||
nearby_mask = np.zeros(num_polygons, dtype=bool)
|
||||
vi = 0
|
||||
for i, length in enumerate(polygon_lengths):
|
||||
if length >= 3:
|
||||
cx = polygons_x[vi]
|
||||
cy = polygons_y[vi]
|
||||
if line_min_x <= cx <= line_max_x and line_min_y <= cy <= line_max_y:
|
||||
nearby_mask[i] = True
|
||||
vi += length
|
||||
|
||||
# Cap at max_polygons
|
||||
nearby_indices = np.where(nearby_mask)[0]
|
||||
if len(nearby_indices) > max_polygons:
|
||||
nearby_mask = np.zeros(num_polygons, dtype=bool)
|
||||
nearby_mask[nearby_indices[:max_polygons]] = True
|
||||
else:
|
||||
nearby_mask = np.ones(num_polygons, dtype=bool)
|
||||
|
||||
idx = 0
|
||||
for i, length in enumerate(polygon_lengths):
|
||||
if length < 3 or not nearby_mask[i]:
|
||||
idx += length
|
||||
continue
|
||||
|
||||
px = polygons_x[idx:idx + length]
|
||||
py = polygons_y[idx:idx + length]
|
||||
|
||||
starts = np.stack([px, py], axis=1)
|
||||
ends = np.stack([np.roll(px, -1), np.roll(py, -1)], axis=1)
|
||||
|
||||
edge_intersects, t_vals = line_segments_intersect_batch(p1, p2, starts, ends)
|
||||
|
||||
if np.any(edge_intersects):
|
||||
intersects[i] = True
|
||||
min_t[i] = np.min(t_vals[edge_intersects])
|
||||
|
||||
idx += length
|
||||
|
||||
line_length = np.linalg.norm(p2 - p1)
|
||||
min_distances = min_t * line_length
|
||||
|
||||
return intersects, min_distances
|
||||
|
||||
|
||||
def calculate_reflection_points_batch(
|
||||
tx: np.ndarray, rx: np.ndarray,
|
||||
wall_starts: np.ndarray, wall_ends: np.ndarray,
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Calculate reflection points on N walls via mirror-image method.
|
||||
|
||||
Args:
|
||||
tx, rx: shape (2,)
|
||||
wall_starts, wall_ends: shape (N, 2)
|
||||
|
||||
Returns:
|
||||
reflection_points: (N, 2)
|
||||
valid: bool (N,)
|
||||
"""
|
||||
wall_vec = wall_ends - wall_starts
|
||||
wall_length = np.linalg.norm(wall_vec, axis=1, keepdims=True)
|
||||
wall_unit = wall_vec / np.maximum(wall_length, 1e-10)
|
||||
|
||||
normals = np.stack([-wall_unit[:, 1], wall_unit[:, 0]], axis=1)
|
||||
|
||||
tx_to_wall = tx - wall_starts
|
||||
tx_dist_to_wall = np.sum(tx_to_wall * normals, axis=1, keepdims=True)
|
||||
tx_mirror = tx - 2 * tx_dist_to_wall * normals
|
||||
|
||||
rx_to_mirror = tx_mirror - rx
|
||||
|
||||
cross_denom = (rx_to_mirror[:, 0] * wall_vec[:, 1] -
|
||||
rx_to_mirror[:, 1] * wall_vec[:, 0])
|
||||
|
||||
valid_denom = np.abs(cross_denom) > 1e-10
|
||||
cross_denom_safe = np.where(valid_denom, cross_denom, 1.0)
|
||||
|
||||
rx_to_start = wall_starts - rx
|
||||
t = (rx_to_start[:, 0] * rx_to_mirror[:, 1] -
|
||||
rx_to_start[:, 1] * rx_to_mirror[:, 0]) / cross_denom_safe
|
||||
|
||||
reflection_points = wall_starts + t[:, np.newaxis] * wall_vec
|
||||
|
||||
valid = valid_denom & (t >= 0) & (t <= 1) & (tx_dist_to_wall[:, 0] > 0)
|
||||
|
||||
return reflection_points, valid
|
||||
|
||||
|
||||
def find_best_reflection_path_vectorized(
|
||||
tx: np.ndarray, rx: np.ndarray,
|
||||
building_walls_start: np.ndarray,
|
||||
building_walls_end: np.ndarray,
|
||||
wall_to_building: np.ndarray,
|
||||
obstacle_polygons_x: np.ndarray,
|
||||
obstacle_polygons_y: np.ndarray,
|
||||
obstacle_lengths: np.ndarray,
|
||||
max_candidates: int = 50,
|
||||
max_walls: int = 100,
|
||||
max_los_checks: int = 10,
|
||||
) -> Tuple[Optional[np.ndarray], float, float]:
|
||||
"""Find best single-reflection path using vectorized ops.
|
||||
|
||||
Args:
|
||||
max_walls: Only consider closest N walls for reflection candidates.
|
||||
max_los_checks: Only verify LOS for top N shortest reflection paths.
|
||||
|
||||
Returns:
|
||||
best_reflection_point: (2,) or None
|
||||
best_path_length: meters
|
||||
best_reflection_loss: dB
|
||||
"""
|
||||
num_walls = len(building_walls_start)
|
||||
if num_walls == 0:
|
||||
return None, np.inf, 0.0
|
||||
|
||||
# Limit walls by distance to path midpoint
|
||||
if num_walls > max_walls:
|
||||
midpoint = (tx + rx) / 2
|
||||
wall_midpoints = (building_walls_start + building_walls_end) / 2
|
||||
wall_distances = np.linalg.norm(wall_midpoints - midpoint, axis=1)
|
||||
closest = np.argpartition(wall_distances, max_walls)[:max_walls]
|
||||
building_walls_start = building_walls_start[closest]
|
||||
building_walls_end = building_walls_end[closest]
|
||||
wall_to_building = wall_to_building[closest]
|
||||
|
||||
refl_points, valid = calculate_reflection_points_batch(
|
||||
tx, rx, building_walls_start, building_walls_end,
|
||||
)
|
||||
|
||||
if not np.any(valid):
|
||||
return None, np.inf, 0.0
|
||||
|
||||
valid_indices = np.where(valid)[0]
|
||||
valid_refl = refl_points[valid]
|
||||
|
||||
tx_to_refl = np.linalg.norm(valid_refl - tx, axis=1)
|
||||
refl_to_rx = np.linalg.norm(rx - valid_refl, axis=1)
|
||||
path_lengths = tx_to_refl + refl_to_rx
|
||||
|
||||
# Direct distance filter: skip if reflection path > 2x direct
|
||||
direct_dist = np.linalg.norm(rx - tx)
|
||||
within_range = path_lengths <= direct_dist * 2.0
|
||||
if not np.any(within_range):
|
||||
return None, np.inf, 0.0
|
||||
|
||||
valid_indices = valid_indices[within_range]
|
||||
valid_refl = valid_refl[within_range]
|
||||
path_lengths = path_lengths[within_range]
|
||||
|
||||
# Keep top candidates by shortest path
|
||||
if len(valid_indices) > max_candidates:
|
||||
top_idx = np.argpartition(path_lengths, max_candidates)[:max_candidates]
|
||||
valid_indices = valid_indices[top_idx]
|
||||
valid_refl = valid_refl[top_idx]
|
||||
path_lengths = path_lengths[top_idx]
|
||||
|
||||
# Sort by path length for early exit
|
||||
sort_order = np.argsort(path_lengths)
|
||||
valid_refl = valid_refl[sort_order]
|
||||
path_lengths = path_lengths[sort_order]
|
||||
|
||||
# Check LOS only for top N shortest candidates
|
||||
check_count = min(len(valid_refl), max_los_checks)
|
||||
best_idx = -1
|
||||
best_length = np.inf
|
||||
|
||||
for i in range(check_count):
|
||||
length = path_lengths[i]
|
||||
if length >= best_length:
|
||||
continue
|
||||
|
||||
refl_pt = valid_refl[i]
|
||||
|
||||
# TX -> reflection LOS
|
||||
intersects1, _ = line_intersects_polygons_batch(
|
||||
tx, refl_pt, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
|
||||
)
|
||||
if np.any(intersects1):
|
||||
continue
|
||||
|
||||
# Reflection -> RX LOS
|
||||
intersects2, _ = line_intersects_polygons_batch(
|
||||
refl_pt, rx, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
|
||||
)
|
||||
if np.any(intersects2):
|
||||
continue
|
||||
|
||||
best_idx = i
|
||||
best_length = length
|
||||
break # sorted by length, first valid is best
|
||||
|
||||
if best_idx < 0:
|
||||
return None, np.inf, 0.0
|
||||
|
||||
best_point = valid_refl[best_idx]
|
||||
|
||||
# Reflection loss: 3-10 dB depending on path ratio
|
||||
path_ratio = best_length / max(direct_dist, 1.0)
|
||||
reflection_loss = 3.0 + 7.0 * min(1.0, (path_ratio - 1.0) * 2)
|
||||
|
||||
return best_point, best_length, reflection_loss
|
||||
275
backend/app/services/gpu_backend.py
Normal file
275
backend/app/services/gpu_backend.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
GPU Backend Manager — detects and manages compute backends.
|
||||
|
||||
Supports:
|
||||
- CUDA via CuPy
|
||||
- OpenCL via PyOpenCL (future)
|
||||
- CPU via NumPy (always available)
|
||||
|
||||
Usage:
|
||||
from app.services.gpu_backend import gpu_manager
|
||||
xp = gpu_manager.get_array_module() # cupy or numpy
|
||||
status = gpu_manager.get_status()
|
||||
"""
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GPUBackend(str, Enum):
|
||||
CUDA = "cuda"
|
||||
OPENCL = "opencl"
|
||||
CPU = "cpu"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GPUDevice:
|
||||
backend: GPUBackend
|
||||
index: int
|
||||
name: str
|
||||
memory_mb: int
|
||||
extra: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
class GPUManager:
|
||||
"""Singleton GPU manager with device detection and selection."""
|
||||
|
||||
def __init__(self):
|
||||
self._devices: list[GPUDevice] = []
|
||||
self._active_backend: GPUBackend = GPUBackend.CPU
|
||||
self._active_device: Optional[GPUDevice] = None
|
||||
self._cupy = None
|
||||
self._detect_devices()
|
||||
|
||||
def _detect_devices(self):
|
||||
"""Probe available GPU backends."""
|
||||
# Always add CPU
|
||||
cpu_device = GPUDevice(
|
||||
backend=GPUBackend.CPU,
|
||||
index=0,
|
||||
name="CPU (NumPy)",
|
||||
memory_mb=0,
|
||||
)
|
||||
self._devices.append(cpu_device)
|
||||
|
||||
# Try CuPy / CUDA
|
||||
try:
|
||||
import cupy as cp
|
||||
device_count = cp.cuda.runtime.getDeviceCount()
|
||||
for i in range(device_count):
|
||||
props = cp.cuda.runtime.getDeviceProperties(i)
|
||||
name = props["name"]
|
||||
if isinstance(name, bytes):
|
||||
name = name.decode()
|
||||
mem_mb = props["totalGlobalMem"] // (1024 * 1024)
|
||||
cuda_ver = cp.cuda.runtime.runtimeGetVersion()
|
||||
device = GPUDevice(
|
||||
backend=GPUBackend.CUDA,
|
||||
index=i,
|
||||
name=str(name),
|
||||
memory_mb=mem_mb,
|
||||
extra={"cuda_version": cuda_ver},
|
||||
)
|
||||
self._devices.append(device)
|
||||
logger.info(f"[GPU] CUDA device {i}: {name} ({mem_mb} MB)")
|
||||
if device_count > 0:
|
||||
self._cupy = cp
|
||||
except ImportError:
|
||||
logger.info("[GPU] CuPy not installed — CUDA unavailable")
|
||||
except Exception as e:
|
||||
logger.warning(f"[GPU] CuPy probe error: {e}")
|
||||
|
||||
# Try PyOpenCL (future — stub for detection only)
|
||||
try:
|
||||
import pyopencl as cl
|
||||
platforms = cl.get_platforms()
|
||||
for plat in platforms:
|
||||
for dev in plat.get_devices():
|
||||
mem_mb = dev.global_mem_size // (1024 * 1024)
|
||||
device = GPUDevice(
|
||||
backend=GPUBackend.OPENCL,
|
||||
index=len([d for d in self._devices if d.backend == GPUBackend.OPENCL]),
|
||||
name=dev.name.strip(),
|
||||
memory_mb=mem_mb,
|
||||
extra={"platform": plat.name.strip()},
|
||||
)
|
||||
self._devices.append(device)
|
||||
logger.info(f"[GPU] OpenCL device: {device.name} ({mem_mb} MB)")
|
||||
except ImportError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.debug(f"[GPU] OpenCL probe error: {e}")
|
||||
|
||||
# Auto-select best: prefer CUDA > OpenCL > CPU
|
||||
cuda_devices = [d for d in self._devices if d.backend == GPUBackend.CUDA]
|
||||
if cuda_devices:
|
||||
self._active_backend = GPUBackend.CUDA
|
||||
self._active_device = cuda_devices[0]
|
||||
logger.info(f"[GPU] Active backend: CUDA — {self._active_device.name}")
|
||||
else:
|
||||
self._active_backend = GPUBackend.CPU
|
||||
self._active_device = cpu_device
|
||||
logger.info("[GPU] Active backend: CPU (NumPy)")
|
||||
|
||||
@property
|
||||
def gpu_available(self) -> bool:
|
||||
return self._active_backend != GPUBackend.CPU
|
||||
|
||||
def get_array_module(self) -> Any:
|
||||
"""Return cupy (if CUDA active) or numpy."""
|
||||
if self._active_backend == GPUBackend.CUDA and self._cupy is not None:
|
||||
return self._cupy
|
||||
return np
|
||||
|
||||
def to_cpu(self, arr: Any) -> np.ndarray:
|
||||
"""Transfer array to CPU numpy."""
|
||||
if hasattr(arr, 'get'):
|
||||
return arr.get()
|
||||
return np.asarray(arr)
|
||||
|
||||
def get_status(self) -> dict:
|
||||
"""Full status dict for API."""
|
||||
return {
|
||||
"active_backend": self._active_backend.value,
|
||||
"active_device": {
|
||||
"backend": self._active_device.backend.value,
|
||||
"index": self._active_device.index,
|
||||
"name": self._active_device.name,
|
||||
"memory_mb": self._active_device.memory_mb,
|
||||
} if self._active_device else None,
|
||||
"gpu_available": self.gpu_available,
|
||||
"available_devices": [
|
||||
{
|
||||
"backend": d.backend.value,
|
||||
"index": d.index,
|
||||
"name": d.name,
|
||||
"memory_mb": d.memory_mb,
|
||||
}
|
||||
for d in self._devices
|
||||
],
|
||||
}
|
||||
|
||||
def get_devices(self) -> list[dict]:
|
||||
"""Device list for API."""
|
||||
return [
|
||||
{
|
||||
"backend": d.backend.value,
|
||||
"index": d.index,
|
||||
"name": d.name,
|
||||
"memory_mb": d.memory_mb,
|
||||
}
|
||||
for d in self._devices
|
||||
]
|
||||
|
||||
def get_diagnostics(self) -> dict:
|
||||
"""Full diagnostic info for troubleshooting GPU detection."""
|
||||
import sys
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
is_wsl = "microsoft" in platform.release().lower()
|
||||
|
||||
diag = {
|
||||
"python_version": sys.version,
|
||||
"python_executable": sys.executable,
|
||||
"platform": platform.platform(),
|
||||
"is_wsl": is_wsl,
|
||||
"numpy": {"version": np.__version__},
|
||||
"cuda": {},
|
||||
"opencl": {},
|
||||
"nvidia_smi": None,
|
||||
"detected_devices": len(self._devices),
|
||||
"active_backend": self._active_backend.value,
|
||||
}
|
||||
|
||||
# Check nvidia-smi (works even without CuPy)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total,driver_version", "--format=csv,noheader"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
diag["nvidia_smi"] = result.stdout.strip()
|
||||
except Exception:
|
||||
diag["nvidia_smi"] = "not found or error"
|
||||
|
||||
# Check CuPy/CUDA
|
||||
try:
|
||||
import cupy as cp
|
||||
diag["cuda"]["cupy_version"] = cp.__version__
|
||||
diag["cuda"]["cuda_runtime_version"] = cp.cuda.runtime.runtimeGetVersion()
|
||||
diag["cuda"]["device_count"] = cp.cuda.runtime.getDeviceCount()
|
||||
for i in range(diag["cuda"]["device_count"]):
|
||||
props = cp.cuda.runtime.getDeviceProperties(i)
|
||||
name = props["name"]
|
||||
if isinstance(name, bytes):
|
||||
name = name.decode()
|
||||
diag["cuda"][f"device_{i}"] = {
|
||||
"name": str(name),
|
||||
"memory_mb": props["totalGlobalMem"] // (1024 * 1024),
|
||||
"compute_capability": f"{props['major']}.{props['minor']}",
|
||||
}
|
||||
except ImportError:
|
||||
diag["cuda"]["error"] = "CuPy not installed"
|
||||
if is_wsl:
|
||||
diag["cuda"]["install_hint"] = "pip3 install cupy-cuda12x --break-system-packages"
|
||||
else:
|
||||
diag["cuda"]["install_hint"] = "pip install cupy-cuda12x"
|
||||
except Exception as e:
|
||||
diag["cuda"]["error"] = str(e)
|
||||
|
||||
# Check PyOpenCL
|
||||
try:
|
||||
import pyopencl as cl
|
||||
diag["opencl"]["pyopencl_version"] = cl.VERSION_TEXT
|
||||
diag["opencl"]["platforms"] = []
|
||||
for p in cl.get_platforms():
|
||||
platform_info = {"name": p.name.strip(), "devices": []}
|
||||
for d in p.get_devices():
|
||||
platform_info["devices"].append({
|
||||
"name": d.name.strip(),
|
||||
"type": cl.device_type.to_string(d.type),
|
||||
"memory_mb": d.global_mem_size // (1024 * 1024),
|
||||
"compute_units": d.max_compute_units,
|
||||
})
|
||||
diag["opencl"]["platforms"].append(platform_info)
|
||||
except ImportError:
|
||||
diag["opencl"]["error"] = "PyOpenCL not installed"
|
||||
if is_wsl:
|
||||
diag["opencl"]["install_hint"] = "pip3 install pyopencl --break-system-packages"
|
||||
else:
|
||||
diag["opencl"]["install_hint"] = "pip install pyopencl"
|
||||
except Exception as e:
|
||||
diag["opencl"]["error"] = str(e)
|
||||
|
||||
return diag
|
||||
|
||||
def set_device(self, backend: str, index: int = 0) -> dict:
|
||||
"""Switch active compute device."""
|
||||
target_backend = GPUBackend(backend)
|
||||
candidates = [d for d in self._devices
|
||||
if d.backend == target_backend and d.index == index]
|
||||
if not candidates:
|
||||
raise ValueError(f"No device found: backend={backend}, index={index}")
|
||||
|
||||
self._active_device = candidates[0]
|
||||
self._active_backend = target_backend
|
||||
|
||||
if target_backend == GPUBackend.CUDA and self._cupy is not None:
|
||||
self._cupy.cuda.Device(index).use()
|
||||
|
||||
logger.info(f"[GPU] Switched to: {self._active_device.name} ({target_backend.value})")
|
||||
return {
|
||||
"backend": self._active_backend.value,
|
||||
"device": self._active_device.name,
|
||||
}
|
||||
|
||||
|
||||
# Singleton
|
||||
gpu_manager = GPUManager()
|
||||
@@ -3,7 +3,7 @@ GPU-accelerated computation service using CuPy.
|
||||
Falls back to NumPy when CuPy/CUDA is not available.
|
||||
|
||||
Provides vectorized batch operations for coverage calculation:
|
||||
- Haversine distance (site → all grid points)
|
||||
- Haversine distance (site -> all grid points)
|
||||
- Okumura-Hata path loss (all distances at once)
|
||||
|
||||
Usage:
|
||||
@@ -11,48 +11,29 @@ Usage:
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Dict, Any
|
||||
|
||||
# ── Try CuPy import ──
|
||||
|
||||
GPU_AVAILABLE = False
|
||||
GPU_INFO: Optional[Dict[str, Any]] = None
|
||||
cp = None
|
||||
|
||||
try:
|
||||
import cupy as _cp
|
||||
device_count = _cp.cuda.runtime.getDeviceCount()
|
||||
if device_count > 0:
|
||||
cp = _cp
|
||||
GPU_AVAILABLE = True
|
||||
props = _cp.cuda.runtime.getDeviceProperties(0)
|
||||
GPU_INFO = {
|
||||
"name": props["name"].decode() if isinstance(props["name"], bytes) else str(props["name"]),
|
||||
"memory_mb": props["totalGlobalMem"] // (1024 * 1024),
|
||||
"cuda_version": _cp.cuda.runtime.runtimeGetVersion(),
|
||||
}
|
||||
print(f"[GPU] CUDA available: {GPU_INFO['name']} ({GPU_INFO['memory_mb']} MB)", flush=True)
|
||||
else:
|
||||
print("[GPU] No CUDA devices found", flush=True)
|
||||
except ImportError:
|
||||
print("[GPU] CuPy not installed — using CPU/NumPy", flush=True)
|
||||
print("[GPU] To enable GPU acceleration, install CuPy:", flush=True)
|
||||
print("[GPU] For CUDA 12.x: pip install cupy-cuda12x", flush=True)
|
||||
print("[GPU] For CUDA 11.x: pip install cupy-cuda11x", flush=True)
|
||||
print("[GPU] Check CUDA version: nvidia-smi", flush=True)
|
||||
except Exception as e:
|
||||
print(f"[GPU] CuPy error: {e} — GPU acceleration disabled", flush=True)
|
||||
from app.services.gpu_backend import gpu_manager
|
||||
|
||||
# Backward-compatible exports
|
||||
GPU_AVAILABLE = gpu_manager.gpu_available
|
||||
GPU_INFO: Dict[str, Any] | None = (
|
||||
{
|
||||
"name": gpu_manager._active_device.name,
|
||||
"memory_mb": gpu_manager._active_device.memory_mb,
|
||||
**gpu_manager._active_device.extra,
|
||||
}
|
||||
if gpu_manager.gpu_available and gpu_manager._active_device
|
||||
else None
|
||||
)
|
||||
|
||||
# Array module: cupy on GPU, numpy on CPU
|
||||
xp = cp if GPU_AVAILABLE else np
|
||||
xp = gpu_manager.get_array_module()
|
||||
|
||||
|
||||
def _to_cpu(arr):
|
||||
"""Transfer array to CPU numpy if on GPU."""
|
||||
if GPU_AVAILABLE and hasattr(arr, 'get'):
|
||||
return arr.get()
|
||||
return np.asarray(arr)
|
||||
return gpu_manager.to_cpu(arr)
|
||||
|
||||
|
||||
class GPUService:
|
||||
@@ -60,13 +41,13 @@ class GPUService:
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
return GPU_AVAILABLE
|
||||
return gpu_manager.gpu_available
|
||||
|
||||
def get_info(self) -> Dict[str, Any]:
|
||||
"""Return GPU info dict for system endpoint."""
|
||||
if not GPU_AVAILABLE:
|
||||
if not gpu_manager.gpu_available:
|
||||
return {"available": False, "name": None, "memory_mb": None}
|
||||
return {"available": True, **GPU_INFO}
|
||||
return {"available": True, **(GPU_INFO or {})}
|
||||
|
||||
def precompute_distances(
|
||||
self,
|
||||
@@ -79,16 +60,17 @@ class GPUService:
|
||||
|
||||
Returns distances in meters as a CPU numpy array.
|
||||
"""
|
||||
lat1 = xp.radians(xp.asarray(grid_lats, dtype=xp.float64))
|
||||
lon1 = xp.radians(xp.asarray(grid_lons, dtype=xp.float64))
|
||||
lat2 = xp.radians(xp.float64(site_lat))
|
||||
lon2 = xp.radians(xp.float64(site_lon))
|
||||
_xp = gpu_manager.get_array_module()
|
||||
lat1 = _xp.radians(_xp.asarray(grid_lats, dtype=_xp.float64))
|
||||
lon1 = _xp.radians(_xp.asarray(grid_lons, dtype=_xp.float64))
|
||||
lat2 = _xp.radians(_xp.float64(site_lat))
|
||||
lon2 = _xp.radians(_xp.float64(site_lon))
|
||||
|
||||
dlat = lat2 - lat1
|
||||
dlon = lon2 - lon1
|
||||
|
||||
a = xp.sin(dlat / 2) ** 2 + xp.cos(lat1) * xp.cos(lat2) * xp.sin(dlon / 2) ** 2
|
||||
c = 2 * xp.arcsin(xp.sqrt(a))
|
||||
a = _xp.sin(dlat / 2) ** 2 + _xp.cos(lat1) * _xp.cos(lat2) * _xp.sin(dlon / 2) ** 2
|
||||
c = 2 * _xp.arcsin(_xp.sqrt(a))
|
||||
|
||||
distances = 6371000.0 * c
|
||||
return _to_cpu(distances)
|
||||
@@ -99,28 +81,494 @@ class GPUService:
|
||||
frequency_mhz: float,
|
||||
tx_height: float,
|
||||
rx_height: float = 1.5,
|
||||
environment: str = "urban",
|
||||
) -> np.ndarray:
|
||||
"""Vectorized Okumura-Hata path loss for all distances.
|
||||
"""Vectorized path loss using the appropriate propagation model.
|
||||
|
||||
Selects model based on frequency (Phase 3.0 model selection), then
|
||||
applies the correct formula in a single vectorized numpy pass.
|
||||
|
||||
Returns path loss in dB as a CPU numpy array.
|
||||
"""
|
||||
d_arr = xp.asarray(distances, dtype=xp.float64)
|
||||
d_km = xp.maximum(d_arr / 1000.0, 0.1)
|
||||
_xp = gpu_manager.get_array_module()
|
||||
d_arr = _xp.asarray(distances, dtype=_xp.float64)
|
||||
d_km = _xp.maximum(d_arr / 1000.0, 0.1)
|
||||
|
||||
freq = float(frequency_mhz)
|
||||
h_tx = float(tx_height)
|
||||
h_rx = float(rx_height)
|
||||
h_tx = max(float(tx_height), 1.0)
|
||||
h_rx = max(float(rx_height), 1.0)
|
||||
|
||||
log_f = xp.log10(xp.float64(freq))
|
||||
log_hb = xp.log10(xp.float64(h_tx))
|
||||
log_f = _xp.log10(_xp.float64(freq))
|
||||
log_hb = _xp.log10(_xp.float64(max(h_tx, 1.0)))
|
||||
|
||||
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
|
||||
if freq > 2000:
|
||||
# Free-Space Path Loss: FSPL = 20*log10(d_km) + 20*log10(f) + 32.45
|
||||
L = 20.0 * _xp.log10(d_km) + 20.0 * log_f + 32.45
|
||||
|
||||
L = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
|
||||
+ (44.9 - 6.55 * log_hb) * xp.log10(d_km))
|
||||
elif freq > 1500:
|
||||
# COST-231 Hata: extends Okumura-Hata to 1500-2000 MHz
|
||||
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
|
||||
L = (46.3 + 33.9 * log_f - 13.82 * log_hb - a_hm
|
||||
+ (44.9 - 6.55 * log_hb) * _xp.log10(d_km))
|
||||
if environment == "urban":
|
||||
L += 3.0 # Metropolitan center correction
|
||||
|
||||
elif freq >= 150:
|
||||
# Okumura-Hata: 150-1500 MHz
|
||||
if environment == "urban" and freq >= 400:
|
||||
a_hm = 3.2 * (_xp.log10(11.75 * h_rx) ** 2) - 4.97
|
||||
else:
|
||||
a_hm = (1.1 * log_f - 0.7) * h_rx - (1.56 * log_f - 0.8)
|
||||
|
||||
L_urban = (69.55 + 26.16 * log_f - 13.82 * log_hb - a_hm
|
||||
+ (44.9 - 6.55 * log_hb) * _xp.log10(d_km))
|
||||
|
||||
if environment == "suburban":
|
||||
L = L_urban - 2 * (_xp.log10(freq / 28) ** 2) - 5.4
|
||||
elif environment == "rural":
|
||||
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 35.94
|
||||
elif environment == "open":
|
||||
L = L_urban - 4.78 * (log_f ** 2) + 18.33 * log_f - 40.94
|
||||
else:
|
||||
L = L_urban
|
||||
|
||||
else:
|
||||
# Very low frequency — Longley-Rice simplified (area mode)
|
||||
# Use FSPL as baseline with terrain roughness correction
|
||||
L = 20.0 * _xp.log10(d_km) + 20.0 * log_f + 32.45 + 10.0
|
||||
|
||||
return _to_cpu(L)
|
||||
|
||||
def batch_terrain_los(
|
||||
self,
|
||||
site_lat: float,
|
||||
site_lon: float,
|
||||
site_height: float,
|
||||
site_elevation: float,
|
||||
grid_lats: np.ndarray,
|
||||
grid_lons: np.ndarray,
|
||||
grid_elevations: np.ndarray,
|
||||
distances: np.ndarray,
|
||||
frequency_mhz: float,
|
||||
terrain_cache: dict,
|
||||
num_samples: int = 30,
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
"""Batch compute terrain LOS and diffraction loss for all grid points.
|
||||
|
||||
This is the key GPU optimization — instead of sampling terrain profiles
|
||||
one point at a time, we sample ALL profiles in parallel using vectorized
|
||||
operations.
|
||||
|
||||
Args:
|
||||
site_lat, site_lon: Site coordinates
|
||||
site_height: Antenna height above ground (meters)
|
||||
site_elevation: Ground elevation at site (meters)
|
||||
grid_lats, grid_lons: All grid point coordinates
|
||||
grid_elevations: Ground elevation at each grid point
|
||||
distances: Pre-computed distances from site to each point (meters)
|
||||
frequency_mhz: Frequency for diffraction calculation
|
||||
terrain_cache: Dict[tile_name -> numpy array] from terrain_service
|
||||
num_samples: Number of samples per terrain profile
|
||||
|
||||
Returns:
|
||||
(has_los, terrain_loss) - both shape (N,)
|
||||
has_los: boolean array, True if clear line of sight
|
||||
terrain_loss: diffraction loss in dB (0 if has_los)
|
||||
"""
|
||||
_xp = gpu_manager.get_array_module()
|
||||
N = len(grid_lats)
|
||||
|
||||
if N == 0:
|
||||
return np.array([], dtype=bool), np.array([], dtype=np.float64)
|
||||
|
||||
# Convert inputs to GPU arrays
|
||||
g_lats = _xp.asarray(grid_lats, dtype=_xp.float64)
|
||||
g_lons = _xp.asarray(grid_lons, dtype=_xp.float64)
|
||||
g_elevs = _xp.asarray(grid_elevations, dtype=_xp.float64)
|
||||
g_dists = _xp.asarray(distances, dtype=_xp.float64)
|
||||
|
||||
# Heights
|
||||
tx_total = float(site_elevation + site_height)
|
||||
rx_height = 1.5 # Receiver height above ground
|
||||
|
||||
# Earth curvature constants
|
||||
EARTH_RADIUS = 6371000.0
|
||||
K_FACTOR = 4.0 / 3.0
|
||||
effective_radius = K_FACTOR * EARTH_RADIUS
|
||||
|
||||
# Sample terrain profiles for all points at once
|
||||
# Create sample positions: shape (N, num_samples)
|
||||
t = _xp.linspace(0, 1, num_samples, dtype=_xp.float64) # (S,)
|
||||
t = t.reshape(1, -1) # (1, S)
|
||||
|
||||
# Interpolate lat/lon for all sample points
|
||||
# sample_lats[i, j] = site_lat + t[j] * (grid_lats[i] - site_lat)
|
||||
dlat = g_lats.reshape(-1, 1) - site_lat # (N, 1)
|
||||
dlon = g_lons.reshape(-1, 1) - site_lon # (N, 1)
|
||||
sample_lats = site_lat + t * dlat # (N, S)
|
||||
sample_lons = site_lon + t * dlon # (N, S)
|
||||
|
||||
# Sample distances along path: shape (N, S)
|
||||
sample_dists = t * g_dists.reshape(-1, 1) # (N, S)
|
||||
|
||||
# Get terrain elevations for all samples
|
||||
# This is the tricky part - we need to look up from the tile cache
|
||||
# For GPU efficiency, we'll do this on CPU then transfer
|
||||
sample_lats_cpu = _to_cpu(sample_lats).flatten()
|
||||
sample_lons_cpu = _to_cpu(sample_lons).flatten()
|
||||
|
||||
# Batch elevation lookup from cache
|
||||
sample_elevs_cpu = self._batch_elevation_lookup(
|
||||
sample_lats_cpu, sample_lons_cpu, terrain_cache
|
||||
)
|
||||
sample_elevs = _xp.asarray(sample_elevs_cpu, dtype=_xp.float64).reshape(N, num_samples)
|
||||
|
||||
# Compute LOS line height at each sample point
|
||||
# Linear interpolation from tx to rx
|
||||
rx_total = g_elevs + rx_height # (N,)
|
||||
los_heights = tx_total + t * (rx_total.reshape(-1, 1) - tx_total) # (N, S)
|
||||
|
||||
# Earth curvature correction at each sample
|
||||
total_dist = g_dists.reshape(-1, 1) # (N, 1)
|
||||
d = sample_dists # (N, S)
|
||||
curvature = (d * (total_dist - d)) / (2 * effective_radius) # (N, S)
|
||||
los_heights_corrected = los_heights - curvature # (N, S)
|
||||
|
||||
# Clearance at each sample point
|
||||
clearances = los_heights_corrected - sample_elevs # (N, S)
|
||||
|
||||
# Minimum clearance per profile
|
||||
min_clearances = _xp.min(clearances, axis=1) # (N,)
|
||||
|
||||
# Has LOS if minimum clearance > 0
|
||||
has_los = min_clearances > 0 # (N,)
|
||||
|
||||
# Diffraction loss for points without LOS
|
||||
# Using simplified ITU-R P.526 formula
|
||||
terrain_loss = _xp.zeros(N, dtype=_xp.float64)
|
||||
|
||||
# Only compute diffraction where blocked
|
||||
blocked_mask = ~has_los
|
||||
blocked_clearances = min_clearances[blocked_mask]
|
||||
|
||||
if _xp.any(blocked_mask):
|
||||
# v = |clearance| / 10 (simplified Fresnel parameter)
|
||||
v = _xp.abs(blocked_clearances) / 10.0
|
||||
|
||||
# Diffraction loss formula from ITU-R P.526
|
||||
loss = _xp.where(
|
||||
v <= 0,
|
||||
_xp.zeros_like(v),
|
||||
_xp.where(
|
||||
v < 2.4,
|
||||
6.02 + 9.11 * v + 1.65 * v ** 2,
|
||||
12.95 + 20 * _xp.log10(v)
|
||||
)
|
||||
)
|
||||
# Cap at reasonable max
|
||||
loss = _xp.minimum(loss, 40.0)
|
||||
terrain_loss[blocked_mask] = loss
|
||||
|
||||
return _to_cpu(has_los).astype(bool), _to_cpu(terrain_loss)
|
||||
|
||||
def _batch_elevation_lookup(
|
||||
self,
|
||||
lats: np.ndarray,
|
||||
lons: np.ndarray,
|
||||
terrain_cache: dict,
|
||||
) -> np.ndarray:
|
||||
"""Look up elevations from cached terrain tiles with bilinear interpolation.
|
||||
|
||||
Vectorized implementation: processes per-tile (1-4 tiles) instead of
|
||||
per-point (thousands of points). Uses bilinear interpolation for
|
||||
sub-meter accuracy (vs 15m error with nearest-neighbor at 30m resolution).
|
||||
|
||||
Args:
|
||||
lats, lons: Flattened arrays of coordinates
|
||||
terrain_cache: Dict mapping tile_name -> numpy array
|
||||
|
||||
Returns:
|
||||
elevations: Same shape as input lats
|
||||
"""
|
||||
elevations = np.zeros(len(lats), dtype=np.float64)
|
||||
|
||||
# Vectorized tile identification
|
||||
lat_ints = np.floor(lats).astype(int)
|
||||
lon_ints = np.floor(lons).astype(int)
|
||||
|
||||
# Process per tile (usually 1-4 tiles, not per point)
|
||||
unique_tiles = set(zip(lat_ints, lon_ints))
|
||||
|
||||
for lat_int, lon_int in unique_tiles:
|
||||
lat_letter = 'N' if lat_int >= 0 else 'S'
|
||||
lon_letter = 'E' if lon_int >= 0 else 'W'
|
||||
tile_name = f"{lat_letter}{abs(lat_int):02d}{lon_letter}{abs(lon_int):03d}"
|
||||
|
||||
tile = terrain_cache.get(tile_name)
|
||||
if tile is None:
|
||||
continue
|
||||
|
||||
# Mask for points in this tile
|
||||
mask = (lat_ints == lat_int) & (lon_ints == lon_int)
|
||||
tile_lats = lats[mask]
|
||||
tile_lons = lons[mask]
|
||||
|
||||
size = tile.shape[0]
|
||||
|
||||
# Vectorized bilinear interpolation
|
||||
lat_frac = tile_lats - lat_int
|
||||
lon_frac = tile_lons - lon_int
|
||||
|
||||
row_exact = (1.0 - lat_frac) * (size - 1)
|
||||
col_exact = lon_frac * (size - 1)
|
||||
|
||||
r0 = np.clip(row_exact.astype(int), 0, size - 2)
|
||||
c0 = np.clip(col_exact.astype(int), 0, size - 2)
|
||||
r1 = r0 + 1
|
||||
c1 = c0 + 1
|
||||
|
||||
dr = row_exact - r0
|
||||
dc = col_exact - c0
|
||||
|
||||
# Get four corner values for all points at once
|
||||
z00 = tile[r0, c0].astype(np.float64)
|
||||
z01 = tile[r0, c1].astype(np.float64)
|
||||
z10 = tile[r1, c0].astype(np.float64)
|
||||
z11 = tile[r1, c1].astype(np.float64)
|
||||
|
||||
# Bilinear interpolation (vectorized)
|
||||
result = (z00 * (1 - dr) * (1 - dc) +
|
||||
z01 * (1 - dr) * dc +
|
||||
z10 * dr * (1 - dc) +
|
||||
z11 * dr * dc)
|
||||
|
||||
# Handle void values (-32768) - set to 0
|
||||
void_mask = (z00 == -32768) | (z01 == -32768) | (z10 == -32768) | (z11 == -32768)
|
||||
result[void_mask] = 0.0
|
||||
|
||||
elevations[mask] = result
|
||||
|
||||
return elevations
|
||||
|
||||
def batch_antenna_pattern(
|
||||
self,
|
||||
site_lat: float,
|
||||
site_lon: float,
|
||||
grid_lats: np.ndarray,
|
||||
grid_lons: np.ndarray,
|
||||
azimuth: float,
|
||||
beamwidth: float,
|
||||
) -> np.ndarray:
|
||||
"""Batch compute antenna pattern loss for all grid points.
|
||||
|
||||
Returns antenna_loss in dB, shape (N,)
|
||||
"""
|
||||
_xp = gpu_manager.get_array_module()
|
||||
N = len(grid_lats)
|
||||
|
||||
if N == 0 or azimuth is None or not beamwidth:
|
||||
return np.zeros(N, dtype=np.float64)
|
||||
|
||||
# Convert to radians
|
||||
lat1 = _xp.radians(_xp.float64(site_lat))
|
||||
lon1 = _xp.radians(_xp.float64(site_lon))
|
||||
lat2 = _xp.radians(_xp.asarray(grid_lats, dtype=_xp.float64))
|
||||
lon2 = _xp.radians(_xp.asarray(grid_lons, dtype=_xp.float64))
|
||||
|
||||
# Calculate bearing from site to each point
|
||||
dlon = lon2 - lon1
|
||||
x = _xp.sin(dlon) * _xp.cos(lat2)
|
||||
y = _xp.cos(lat1) * _xp.sin(lat2) - _xp.sin(lat1) * _xp.cos(lat2) * _xp.cos(dlon)
|
||||
bearings = (_xp.degrees(_xp.arctan2(x, y)) + 360) % 360
|
||||
|
||||
# Angle difference from antenna azimuth
|
||||
angle_diff = _xp.abs(bearings - azimuth)
|
||||
angle_diff = _xp.where(angle_diff > 180, 360 - angle_diff, angle_diff)
|
||||
|
||||
# Antenna pattern loss (simplified sector pattern)
|
||||
half_bw = beamwidth / 2
|
||||
in_main = angle_diff <= half_bw
|
||||
loss_main = 3 * (angle_diff / half_bw) ** 2
|
||||
loss_side = 3 + 12 * ((angle_diff - half_bw) / half_bw) ** 2
|
||||
loss_side = _xp.minimum(loss_side, 25.0)
|
||||
|
||||
antenna_loss = _xp.where(in_main, loss_main, loss_side)
|
||||
return _to_cpu(antenna_loss)
|
||||
|
||||
def batch_final_rsrp(
|
||||
self,
|
||||
tx_power: float,
|
||||
tx_gain: float,
|
||||
path_loss: np.ndarray,
|
||||
terrain_loss: np.ndarray,
|
||||
antenna_loss: np.ndarray,
|
||||
building_loss: np.ndarray,
|
||||
vegetation_loss: np.ndarray,
|
||||
rain_loss: np.ndarray,
|
||||
indoor_loss: np.ndarray,
|
||||
atmospheric_loss: np.ndarray,
|
||||
reflection_gain: np.ndarray,
|
||||
fading_margin: float = 0.0,
|
||||
) -> np.ndarray:
|
||||
"""Vectorized final RSRP calculation.
|
||||
|
||||
RSRP = tx_power + tx_gain - path_loss - terrain_loss - antenna_loss
|
||||
- building_loss - vegetation_loss - rain_loss - indoor_loss
|
||||
- atmospheric_loss + reflection_gain - fading_margin
|
||||
|
||||
Returns RSRP in dBm, shape (N,)
|
||||
"""
|
||||
_xp = gpu_manager.get_array_module()
|
||||
|
||||
rsrp = (
|
||||
float(tx_power) + float(tx_gain)
|
||||
- _xp.asarray(path_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(terrain_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(antenna_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(building_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(vegetation_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(rain_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(indoor_loss, dtype=_xp.float64)
|
||||
- _xp.asarray(atmospheric_loss, dtype=_xp.float64)
|
||||
+ _xp.asarray(reflection_gain, dtype=_xp.float64)
|
||||
- float(fading_margin)
|
||||
)
|
||||
|
||||
return _to_cpu(rsrp)
|
||||
|
||||
def calculate_interference(
|
||||
self,
|
||||
rsrp_grids: list,
|
||||
frequencies: list,
|
||||
) -> tuple:
|
||||
"""Calculate C/I (carrier-to-interference) ratio for multi-site scenarios.
|
||||
|
||||
For each grid point:
|
||||
- C = signal strength from strongest (serving) cell
|
||||
- I = sum of signal strengths from all other co-frequency cells
|
||||
- C/I = C(dBm) - 10*log10(sum of linear interference powers)
|
||||
|
||||
Args:
|
||||
rsrp_grids: List of RSRP arrays, one per site, shape (N,) each
|
||||
frequencies: List of frequencies (MHz) for each site
|
||||
|
||||
Returns:
|
||||
(ci_ratio, best_server_idx, best_rsrp)
|
||||
ci_ratio: C/I in dB, shape (N,)
|
||||
best_server_idx: Index of serving cell per point, shape (N,)
|
||||
best_rsrp: RSRP of serving cell per point, shape (N,)
|
||||
"""
|
||||
_xp = gpu_manager.get_array_module()
|
||||
|
||||
if len(rsrp_grids) < 2:
|
||||
# Single site - no interference, return infinity C/I
|
||||
if rsrp_grids:
|
||||
n_points = len(rsrp_grids[0])
|
||||
return (
|
||||
np.full(n_points, 50.0, dtype=np.float64), # 50 dB = effectively no interference
|
||||
np.zeros(n_points, dtype=np.int32),
|
||||
np.array(rsrp_grids[0], dtype=np.float64),
|
||||
)
|
||||
return np.array([]), np.array([]), np.array([])
|
||||
|
||||
# Stack RSRP grids: shape (num_sites, num_points)
|
||||
rsrp_stack = _xp.stack([_xp.asarray(g, dtype=_xp.float64) for g in rsrp_grids], axis=0)
|
||||
num_sites, num_points = rsrp_stack.shape
|
||||
|
||||
# Convert to linear power (mW)
|
||||
rsrp_linear = _xp.power(10.0, rsrp_stack / 10.0)
|
||||
|
||||
# Best server per point
|
||||
best_server_idx = _xp.argmax(rsrp_stack, axis=0)
|
||||
best_rsrp = _xp.take_along_axis(rsrp_stack, best_server_idx[_xp.newaxis, :], axis=0)[0]
|
||||
best_rsrp_linear = _xp.take_along_axis(rsrp_linear, best_server_idx[_xp.newaxis, :], axis=0)[0]
|
||||
|
||||
# Group sites by frequency for co-channel interference
|
||||
freq_array = _xp.asarray(frequencies, dtype=_xp.float64)
|
||||
|
||||
# Calculate interference only from co-frequency sites
|
||||
interference_linear = _xp.zeros(num_points, dtype=_xp.float64)
|
||||
|
||||
for point_idx in range(num_points):
|
||||
serving_site = int(_to_cpu(best_server_idx[point_idx]))
|
||||
serving_freq = frequencies[serving_site]
|
||||
|
||||
# Sum power from all other sites on same frequency
|
||||
for site_idx in range(num_sites):
|
||||
if site_idx != serving_site and frequencies[site_idx] == serving_freq:
|
||||
interference_linear[point_idx] += rsrp_linear[site_idx, point_idx]
|
||||
|
||||
# C/I ratio in dB
|
||||
# Avoid log10(0) with small epsilon
|
||||
epsilon = 1e-30
|
||||
ci_ratio = 10 * _xp.log10(best_rsrp_linear / (interference_linear + epsilon))
|
||||
|
||||
# Clip to reasonable range (-20 to 50 dB)
|
||||
ci_ratio = _xp.clip(ci_ratio, -20, 50)
|
||||
|
||||
return (
|
||||
_to_cpu(ci_ratio),
|
||||
_to_cpu(best_server_idx).astype(np.int32),
|
||||
_to_cpu(best_rsrp),
|
||||
)
|
||||
|
||||
def calculate_interference_vectorized(
|
||||
self,
|
||||
rsrp_grids: list,
|
||||
frequencies: list,
|
||||
) -> tuple:
|
||||
"""Fully vectorized C/I calculation (faster for GPU).
|
||||
|
||||
Same as calculate_interference but avoids Python loops.
|
||||
"""
|
||||
_xp = gpu_manager.get_array_module()
|
||||
|
||||
if len(rsrp_grids) < 2:
|
||||
if rsrp_grids:
|
||||
n_points = len(rsrp_grids[0])
|
||||
return (
|
||||
np.full(n_points, 50.0, dtype=np.float64),
|
||||
np.zeros(n_points, dtype=np.int32),
|
||||
np.array(rsrp_grids[0], dtype=np.float64),
|
||||
)
|
||||
return np.array([]), np.array([]), np.array([])
|
||||
|
||||
# Stack RSRP grids: shape (num_sites, num_points)
|
||||
rsrp_stack = _xp.stack([_xp.asarray(g, dtype=_xp.float64) for g in rsrp_grids], axis=0)
|
||||
num_sites, num_points = rsrp_stack.shape
|
||||
|
||||
# Convert to linear power (mW)
|
||||
rsrp_linear = _xp.power(10.0, rsrp_stack / 10.0)
|
||||
|
||||
# Best server per point
|
||||
best_server_idx = _xp.argmax(rsrp_stack, axis=0)
|
||||
best_rsrp = _xp.take_along_axis(rsrp_stack, best_server_idx[_xp.newaxis, :], axis=0)[0]
|
||||
best_rsrp_linear = _xp.take_along_axis(rsrp_linear, best_server_idx[_xp.newaxis, :], axis=0)[0]
|
||||
|
||||
# Create frequency match matrix: (num_sites, num_sites)
|
||||
freq_array = _xp.asarray(frequencies, dtype=_xp.float64)
|
||||
freq_match = freq_array[:, _xp.newaxis] == freq_array[_xp.newaxis, :]
|
||||
|
||||
# Total power from all sites
|
||||
total_power = _xp.sum(rsrp_linear, axis=0)
|
||||
|
||||
# For simplified calculation (all sites same frequency):
|
||||
# Interference = total - serving
|
||||
interference_linear = total_power - best_rsrp_linear
|
||||
|
||||
# C/I ratio in dB
|
||||
epsilon = 1e-30
|
||||
ci_ratio = 10 * _xp.log10(best_rsrp_linear / (interference_linear + epsilon))
|
||||
|
||||
# Clip to reasonable range
|
||||
ci_ratio = _xp.clip(ci_ratio, -20, 50)
|
||||
|
||||
return (
|
||||
_to_cpu(ci_ratio),
|
||||
_to_cpu(best_server_idx).astype(np.int32),
|
||||
_to_cpu(best_rsrp),
|
||||
)
|
||||
|
||||
|
||||
# Singleton
|
||||
gpu_service = GPUService()
|
||||
|
||||
167
backend/app/services/osm_client.py
Normal file
167
backend/app/services/osm_client.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Dedicated OpenStreetMap Overpass API client.
|
||||
|
||||
Handles:
|
||||
- Building footprint queries
|
||||
- Vegetation area queries
|
||||
- Water body queries
|
||||
- Response parsing and error handling
|
||||
- Rate limiting (Overpass requires courtesy)
|
||||
"""
|
||||
|
||||
import time
|
||||
import asyncio
|
||||
from typing import List, Optional, Dict, Any
|
||||
|
||||
import httpx
|
||||
|
||||
|
||||
# Overpass API endpoints (primary + mirror)
|
||||
OVERPASS_ENDPOINTS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
|
||||
# Minimum seconds between requests to same endpoint
|
||||
RATE_LIMIT_SECONDS = 1.0
|
||||
|
||||
|
||||
class OSMClient:
|
||||
"""
|
||||
OpenStreetMap Overpass API client with rate limiting
|
||||
and automatic failover between endpoints.
|
||||
"""
|
||||
|
||||
def __init__(self, timeout: float = 60.0):
|
||||
self.timeout = timeout
|
||||
self._last_request_time: float = 0
|
||||
self._current_endpoint = 0
|
||||
|
||||
async def _rate_limit(self):
|
||||
"""Enforce rate limiting between requests."""
|
||||
elapsed = time.monotonic() - self._last_request_time
|
||||
if elapsed < RATE_LIMIT_SECONDS:
|
||||
await asyncio.sleep(RATE_LIMIT_SECONDS - elapsed)
|
||||
self._last_request_time = time.monotonic()
|
||||
|
||||
async def query(self, overpass_ql: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Execute an Overpass QL query with automatic failover.
|
||||
|
||||
Returns parsed JSON response or None on failure.
|
||||
"""
|
||||
await self._rate_limit()
|
||||
|
||||
for i in range(len(OVERPASS_ENDPOINTS)):
|
||||
idx = (self._current_endpoint + i) % len(OVERPASS_ENDPOINTS)
|
||||
endpoint = OVERPASS_ENDPOINTS[idx]
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||
response = await client.post(
|
||||
endpoint,
|
||||
data={"data": overpass_ql},
|
||||
)
|
||||
|
||||
if response.status_code == 429:
|
||||
# Rate limited — try next endpoint
|
||||
print(f"[OSM] Rate limited by {endpoint}, trying next...")
|
||||
continue
|
||||
|
||||
response.raise_for_status()
|
||||
self._current_endpoint = idx
|
||||
return response.json()
|
||||
|
||||
except httpx.TimeoutException:
|
||||
print(f"[OSM] Timeout from {endpoint}")
|
||||
continue
|
||||
except httpx.HTTPStatusError as e:
|
||||
print(f"[OSM] HTTP error from {endpoint}: {e.response.status_code}")
|
||||
continue
|
||||
except Exception as e:
|
||||
print(f"[OSM] Error from {endpoint}: {e}")
|
||||
continue
|
||||
|
||||
print("[OSM] All endpoints failed")
|
||||
return None
|
||||
|
||||
async def fetch_buildings(
|
||||
self,
|
||||
min_lat: float, min_lon: float,
|
||||
max_lat: float, max_lon: float,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch building footprints in a bounding box.
|
||||
|
||||
Returns list of raw OSM elements (ways and relations).
|
||||
"""
|
||||
query = f"""
|
||||
[out:json][timeout:30];
|
||||
(
|
||||
way["building"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
relation["building"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
);
|
||||
out body;
|
||||
>;
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
data = await self.query(query)
|
||||
if data is None:
|
||||
return []
|
||||
|
||||
return data.get("elements", [])
|
||||
|
||||
async def fetch_vegetation(
|
||||
self,
|
||||
min_lat: float, min_lon: float,
|
||||
max_lat: float, max_lon: float,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Fetch vegetation areas (forests, parks, etc.)."""
|
||||
query = f"""
|
||||
[out:json][timeout:30];
|
||||
(
|
||||
way["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
way["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
way["natural"="tree_row"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
relation["natural"="wood"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
relation["landuse"="forest"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
);
|
||||
out body;
|
||||
>;
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
data = await self.query(query)
|
||||
if data is None:
|
||||
return []
|
||||
|
||||
return data.get("elements", [])
|
||||
|
||||
async def fetch_water(
|
||||
self,
|
||||
min_lat: float, min_lon: float,
|
||||
max_lat: float, max_lon: float,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Fetch water bodies (rivers, lakes, etc.)."""
|
||||
query = f"""
|
||||
[out:json][timeout:30];
|
||||
(
|
||||
way["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
way["waterway"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
relation["natural"="water"]({min_lat},{min_lon},{max_lat},{max_lon});
|
||||
);
|
||||
out body;
|
||||
>;
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
data = await self.query(query)
|
||||
if data is None:
|
||||
return []
|
||||
|
||||
return data.get("elements", [])
|
||||
|
||||
|
||||
# Singleton
|
||||
osm_client = OSMClient()
|
||||
@@ -21,6 +21,7 @@ Usage:
|
||||
)
|
||||
"""
|
||||
|
||||
import gc
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
@@ -47,6 +48,24 @@ class CancellationToken:
|
||||
return self._event.is_set()
|
||||
|
||||
|
||||
# ── Active pool tracking (for graceful shutdown) ──
|
||||
|
||||
_active_pool = None # Global ref to current ProcessPoolExecutor
|
||||
_active_pool_lock = threading.Lock()
|
||||
|
||||
|
||||
def _set_active_pool(pool):
|
||||
global _active_pool
|
||||
with _active_pool_lock:
|
||||
_active_pool = pool
|
||||
|
||||
|
||||
def _clear_active_pool():
|
||||
global _active_pool
|
||||
with _active_pool_lock:
|
||||
_active_pool = None
|
||||
|
||||
|
||||
# ── Worker process cleanup ──
|
||||
|
||||
def _clog(msg: str):
|
||||
@@ -57,10 +76,23 @@ def _clog(msg: str):
|
||||
def _kill_worker_processes() -> int:
|
||||
"""Kill ALL rfcp-server processes except the current (main) process.
|
||||
|
||||
Uses process NAME matching instead of PID tree because psutil.children()
|
||||
cannot see grandchildren spawned by ProcessPoolExecutor workers.
|
||||
First shuts down the active ProcessPoolExecutor (if any), then uses
|
||||
process NAME matching to kill remaining workers.
|
||||
Returns the number of processes killed.
|
||||
"""
|
||||
global _active_pool
|
||||
|
||||
# Step 0: Shut down active ProcessPoolExecutor gracefully
|
||||
with _active_pool_lock:
|
||||
pool = _active_pool
|
||||
_active_pool = None
|
||||
if pool is not None:
|
||||
try:
|
||||
pool.shutdown(wait=False, cancel_futures=True)
|
||||
_clog("Active ProcessPoolExecutor shutdown requested")
|
||||
except Exception as e:
|
||||
_clog(f"Pool shutdown error: {e}")
|
||||
|
||||
my_pid = os.getpid()
|
||||
killed_count = 0
|
||||
|
||||
@@ -132,11 +164,16 @@ except ImportError:
|
||||
ray = None # type: ignore
|
||||
|
||||
|
||||
# ── Worker-level spatial index cache (persists across tasks in same worker) ──
|
||||
# ── Worker-level caches (persist across tasks in same worker process) ──
|
||||
|
||||
_worker_spatial_idx = None
|
||||
_worker_cache_key: Optional[str] = None
|
||||
|
||||
# Shared-memory buildings/OSM — unpickled once per worker, cached by key
|
||||
_worker_shared_buildings = None
|
||||
_worker_shared_osm_data = None
|
||||
_worker_shared_data_key: Optional[str] = None
|
||||
|
||||
|
||||
def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
|
||||
"""Implementation: process a chunk of (lat, lon, elevation) tuples.
|
||||
@@ -154,10 +191,12 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
|
||||
# Build or reuse spatial index (expensive — ~1s for 350K buildings).
|
||||
cache_key = config.get('cache_key', '')
|
||||
if _worker_cache_key != cache_key:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
_worker_spatial_idx = SpatialIndex()
|
||||
if buildings:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
_worker_spatial_idx = SpatialIndex()
|
||||
_worker_spatial_idx.build(buildings)
|
||||
else:
|
||||
_worker_spatial_idx = None
|
||||
_worker_cache_key = cache_key
|
||||
|
||||
# Process points
|
||||
@@ -171,6 +210,7 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
|
||||
}
|
||||
|
||||
precomputed = config.get('precomputed')
|
||||
@@ -186,6 +226,9 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
|
||||
config['site_elevation'], point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
precomputed_has_los=pre.get('has_los') if pre else None,
|
||||
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
|
||||
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
|
||||
)
|
||||
if point.rsrp >= settings.min_signal:
|
||||
results.append(point.model_dump())
|
||||
@@ -204,9 +247,14 @@ if RAY_AVAILABLE:
|
||||
|
||||
|
||||
def get_cpu_count() -> int:
|
||||
"""Get number of usable CPU cores, capped at 14."""
|
||||
"""Get number of usable CPU cores, capped at 6.
|
||||
|
||||
Each worker holds its own copy of buildings + OSM data + spatial index
|
||||
(~200-400 MB per worker). Capping at 6 prevents OOM on systems with
|
||||
8-16 GB RAM (especially WSL2 with limited memory allocation).
|
||||
"""
|
||||
try:
|
||||
return min(mp.cpu_count() or 4, 14)
|
||||
return min(mp.cpu_count() or 4, 6)
|
||||
except Exception:
|
||||
return 4
|
||||
|
||||
@@ -262,6 +310,7 @@ def calculate_coverage_parallel(
|
||||
log_fn: Optional[Callable[[str], None]] = None,
|
||||
cancel_token: Optional[CancellationToken] = None,
|
||||
precomputed: Optional[Dict] = None,
|
||||
progress_fn: Optional[Callable[[str, float], None]] = None,
|
||||
) -> Tuple[List[Dict], Dict[str, float]]:
|
||||
"""Calculate coverage points in parallel.
|
||||
|
||||
@@ -287,12 +336,30 @@ def calculate_coverage_parallel(
|
||||
terrain_cache, buildings, streets, water_bodies,
|
||||
vegetation_areas, site_elevation,
|
||||
num_workers, log_fn, cancel_token, precomputed,
|
||||
progress_fn,
|
||||
)
|
||||
except Exception as e:
|
||||
log_fn(f"Ray execution failed: {e} — falling back to sequential")
|
||||
|
||||
# Fallback: ProcessPoolExecutor with reduced workers to avoid MemoryError
|
||||
pool_workers = min(num_workers, 6)
|
||||
# Fallback: ProcessPoolExecutor (shared memory eliminates per-chunk pickle)
|
||||
pool_workers = num_workers
|
||||
|
||||
# Scale workers down based on data volume to prevent OOM.
|
||||
# Each worker unpickles + holds its own copy of buildings, OSM data, and
|
||||
# spatial index. With large datasets the per-worker memory can exceed
|
||||
# 300 MB, so reduce workers to keep total under ~2 GB.
|
||||
data_items = len(buildings) + len(streets) + len(water_bodies) + len(vegetation_areas)
|
||||
if data_items > 20000:
|
||||
pool_workers = min(pool_workers, 2)
|
||||
log_fn(f"Data volume high ({data_items} items) — capping workers at {pool_workers}")
|
||||
elif data_items > 10000:
|
||||
pool_workers = min(pool_workers, 3)
|
||||
log_fn(f"Data volume moderate ({data_items} items) — capping workers at {pool_workers}")
|
||||
elif data_items > 5000:
|
||||
pool_workers = min(pool_workers, 4)
|
||||
log_fn(f"Data volume elevated ({data_items} items) — capping workers at {pool_workers}")
|
||||
|
||||
log_fn(f"ProcessPool: {pool_workers} workers (cpu_count={num_workers}, data_items={data_items})")
|
||||
if pool_workers > 1 and total_points > 100:
|
||||
try:
|
||||
return _calculate_with_process_pool(
|
||||
@@ -300,7 +367,10 @@ def calculate_coverage_parallel(
|
||||
terrain_cache, buildings, streets, water_bodies,
|
||||
vegetation_areas, site_elevation,
|
||||
pool_workers, log_fn, cancel_token, precomputed,
|
||||
progress_fn,
|
||||
)
|
||||
except (MemoryError, OSError) as e:
|
||||
log_fn(f"ProcessPool OOM/OS error: {e} — falling back to sequential")
|
||||
except Exception as e:
|
||||
log_fn(f"ProcessPool failed: {e} — falling back to sequential")
|
||||
|
||||
@@ -310,6 +380,7 @@ def calculate_coverage_parallel(
|
||||
grid, point_elevations, site_dict, settings_dict,
|
||||
buildings, streets, water_bodies, vegetation_areas,
|
||||
site_elevation, log_fn, cancel_token, precomputed,
|
||||
progress_fn,
|
||||
)
|
||||
|
||||
|
||||
@@ -321,6 +392,7 @@ def _calculate_with_ray(
|
||||
terrain_cache, buildings, streets, water_bodies,
|
||||
vegetation_areas, site_elevation,
|
||||
num_workers, log_fn, cancel_token=None, precomputed=None,
|
||||
progress_fn=None,
|
||||
):
|
||||
"""Execute using Ray shared-memory object store."""
|
||||
total_points = len(grid)
|
||||
@@ -357,8 +429,8 @@ def _calculate_with_ray(
|
||||
for lat, lon in grid
|
||||
]
|
||||
|
||||
# ~4 chunks per worker for granular progress
|
||||
chunk_size = max(1, len(items) // (num_workers * 4))
|
||||
# Larger chunks to amortize IPC overhead (was num_workers*4)
|
||||
chunk_size = max(1, min(400, len(items) // max(2, num_workers)))
|
||||
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
|
||||
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
|
||||
|
||||
@@ -404,11 +476,17 @@ def _calculate_with_ray(
|
||||
eta = (total_points - pts) / rate if rate > 0 else 0
|
||||
log_fn(f"Progress: {completed_chunks}/{total_chunks} chunks ({pct}%) — "
|
||||
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
|
||||
if progress_fn:
|
||||
# Map chunk progress to 40%-95% range
|
||||
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / total_chunks))
|
||||
|
||||
calc_time = time.time() - t_calc
|
||||
log_fn(f"Ray done: {calc_time:.1f}s, {len(all_results)} results "
|
||||
f"({calc_time / max(1, total_points) * 1000:.1f}ms/point)")
|
||||
|
||||
# Force garbage collection after Ray computation
|
||||
gc.collect()
|
||||
|
||||
timing = {
|
||||
"parallel_total": calc_time,
|
||||
"ray_put": put_time,
|
||||
@@ -428,9 +506,10 @@ def _pool_worker_process_chunk(args):
|
||||
from app.services.terrain_service import terrain_service
|
||||
terrain_service._tile_cache = terrain_cache
|
||||
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
spatial_idx = SpatialIndex()
|
||||
spatial_idx = None
|
||||
if buildings:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
spatial_idx = SpatialIndex()
|
||||
spatial_idx.build(buildings)
|
||||
|
||||
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
|
||||
@@ -443,6 +522,7 @@ def _pool_worker_process_chunk(args):
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
|
||||
}
|
||||
|
||||
precomputed = config.get('precomputed')
|
||||
@@ -458,6 +538,9 @@ def _pool_worker_process_chunk(args):
|
||||
config['site_elevation'], point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
precomputed_has_los=pre.get('has_los') if pre else None,
|
||||
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
|
||||
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
|
||||
)
|
||||
if point.rsrp >= settings.min_signal:
|
||||
results.append(point.model_dump())
|
||||
@@ -465,32 +548,409 @@ def _pool_worker_process_chunk(args):
|
||||
return results
|
||||
|
||||
|
||||
def _store_terrain_in_shm(terrain_cache: Dict[str, np.ndarray], log_fn) -> Tuple[list, Dict[str, dict]]:
|
||||
"""Store terrain tile arrays in shared memory. Returns (shm_blocks, tile_refs).
|
||||
|
||||
tile_refs is a dict mapping tile_name -> {shm_name, shape, dtype_str}
|
||||
that workers use to reconstruct numpy arrays from shared memory.
|
||||
"""
|
||||
import multiprocessing.shared_memory as shm_mod
|
||||
|
||||
blocks = []
|
||||
refs = {}
|
||||
|
||||
for tile_name, arr in terrain_cache.items():
|
||||
try:
|
||||
block = shm_mod.SharedMemory(create=True, size=arr.nbytes)
|
||||
blocks.append(block)
|
||||
# Copy tile data to shared memory
|
||||
shm_arr = np.ndarray(arr.shape, dtype=arr.dtype, buffer=block.buf)
|
||||
shm_arr[:] = arr[:]
|
||||
refs[tile_name] = {
|
||||
'shm_name': block.name,
|
||||
'shape': arr.shape,
|
||||
'dtype': str(arr.dtype),
|
||||
}
|
||||
except Exception as e:
|
||||
log_fn(f"Failed to store tile {tile_name} in shm: {e}")
|
||||
# Fallback: worker will have to use pickled copy
|
||||
pass
|
||||
|
||||
return blocks, refs
|
||||
|
||||
|
||||
def _store_pickle_in_shm(data, label: str, log_fn) -> Tuple[Optional[Any], Optional[dict]]:
|
||||
"""Pickle arbitrary data into a SharedMemory block.
|
||||
|
||||
Returns (shm_block, ref_dict) where ref_dict = {shm_name, size}.
|
||||
On failure returns (None, None) and caller should fall back to pickle.
|
||||
"""
|
||||
import multiprocessing.shared_memory as shm_mod
|
||||
import pickle
|
||||
|
||||
try:
|
||||
blob = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
size = len(blob)
|
||||
block = shm_mod.SharedMemory(create=True, size=size)
|
||||
block.buf[:size] = blob
|
||||
mb = size / (1024 * 1024)
|
||||
log_fn(f"{label} in shared memory: {mb:.1f} MB")
|
||||
return block, {'shm_name': block.name, 'size': size}
|
||||
except Exception as e:
|
||||
log_fn(f"Failed to store {label} in shm: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def _pool_worker_shm_chunk(args):
|
||||
"""Worker function that reads terrain from shared memory instead of pickle."""
|
||||
import multiprocessing.shared_memory as shm_mod
|
||||
|
||||
chunk, terrain_shm_refs, buildings, osm_data, config = args
|
||||
|
||||
# Reconstruct terrain cache from shared memory (zero-copy numpy views)
|
||||
terrain_cache = {}
|
||||
for tile_name, ref in terrain_shm_refs.items():
|
||||
try:
|
||||
block = shm_mod.SharedMemory(name=ref['shm_name'])
|
||||
terrain_cache[tile_name] = np.ndarray(
|
||||
ref['shape'], dtype=ref['dtype'], buffer=block.buf,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Inject terrain cache
|
||||
from app.services.terrain_service import terrain_service
|
||||
terrain_service._tile_cache = terrain_cache
|
||||
|
||||
# Build spatial index
|
||||
global _worker_spatial_idx, _worker_cache_key
|
||||
cache_key = config.get('cache_key', '')
|
||||
if _worker_cache_key != cache_key:
|
||||
if buildings:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
_worker_spatial_idx = SpatialIndex()
|
||||
_worker_spatial_idx.build(buildings)
|
||||
else:
|
||||
_worker_spatial_idx = None
|
||||
_worker_cache_key = cache_key
|
||||
|
||||
# Process points
|
||||
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
|
||||
site = SiteParams(**config['site_dict'])
|
||||
settings = CoverageSettings(**config['settings_dict'])
|
||||
svc = CoverageService()
|
||||
|
||||
timing = {
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
|
||||
}
|
||||
|
||||
precomputed = config.get('precomputed')
|
||||
|
||||
results = []
|
||||
for lat, lon, point_elev in chunk:
|
||||
pre = precomputed.get((lat, lon)) if precomputed else None
|
||||
point = svc._calculate_point_sync(
|
||||
site, lat, lon, settings,
|
||||
buildings, osm_data.get('streets', []),
|
||||
_worker_spatial_idx, osm_data.get('water_bodies', []),
|
||||
osm_data.get('vegetation_areas', []),
|
||||
config['site_elevation'], point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
precomputed_has_los=pre.get('has_los') if pre else None,
|
||||
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
|
||||
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
|
||||
)
|
||||
if point.rsrp >= settings.min_signal:
|
||||
results.append(point.model_dump())
|
||||
|
||||
return results
|
||||
|
||||
|
||||
_worker_chunk_count: int = 0 # per-worker chunk counter
|
||||
|
||||
|
||||
def _pool_worker_shm_shared(args):
|
||||
"""Worker: terrain + buildings + OSM all via shared memory.
|
||||
|
||||
Per-chunk args are tiny (~8 KB): just point coords, shm refs, and config.
|
||||
Buildings and OSM data are unpickled from shared memory ONCE per worker
|
||||
and cached in module globals for subsequent chunks.
|
||||
"""
|
||||
import multiprocessing.shared_memory as shm_mod
|
||||
import pickle
|
||||
|
||||
global _worker_chunk_count
|
||||
_worker_chunk_count += 1
|
||||
pid = os.getpid()
|
||||
t_worker_start = time.perf_counter()
|
||||
|
||||
chunk, terrain_shm_refs, shared_data_refs, config = args
|
||||
|
||||
# ── Reconstruct terrain from shared memory ──
|
||||
t0 = time.perf_counter()
|
||||
terrain_cache = {}
|
||||
for tile_name, ref in terrain_shm_refs.items():
|
||||
try:
|
||||
block = shm_mod.SharedMemory(name=ref['shm_name'])
|
||||
terrain_cache[tile_name] = np.ndarray(
|
||||
ref['shape'], dtype=ref['dtype'], buffer=block.buf,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
from app.services.terrain_service import terrain_service
|
||||
terrain_service._tile_cache = terrain_cache
|
||||
t_terrain_shm = time.perf_counter() - t0
|
||||
|
||||
# ── Read buildings + OSM from shared memory (cached per worker) ──
|
||||
global _worker_shared_buildings, _worker_shared_osm_data, _worker_shared_data_key
|
||||
global _worker_spatial_idx, _worker_cache_key
|
||||
|
||||
data_key = config.get('cache_key', '')
|
||||
cached = (_worker_shared_data_key == data_key)
|
||||
|
||||
t_unpickle_bld = 0.0
|
||||
t_unpickle_osm = 0.0
|
||||
t_spatial = 0.0
|
||||
|
||||
if not cached:
|
||||
# First chunk for this calculation — unpickle from shm
|
||||
buildings_ref = shared_data_refs.get('buildings')
|
||||
osm_ref = shared_data_refs.get('osm_data')
|
||||
|
||||
if buildings_ref:
|
||||
try:
|
||||
t0 = time.perf_counter()
|
||||
blk = shm_mod.SharedMemory(name=buildings_ref['shm_name'])
|
||||
_worker_shared_buildings = pickle.loads(bytes(blk.buf[:buildings_ref['size']]))
|
||||
t_unpickle_bld = time.perf_counter() - t0
|
||||
except Exception:
|
||||
_worker_shared_buildings = []
|
||||
else:
|
||||
_worker_shared_buildings = []
|
||||
|
||||
if osm_ref:
|
||||
try:
|
||||
t0 = time.perf_counter()
|
||||
blk = shm_mod.SharedMemory(name=osm_ref['shm_name'])
|
||||
_worker_shared_osm_data = pickle.loads(bytes(blk.buf[:osm_ref['size']]))
|
||||
t_unpickle_osm = time.perf_counter() - t0
|
||||
except Exception:
|
||||
_worker_shared_osm_data = {}
|
||||
else:
|
||||
_worker_shared_osm_data = {}
|
||||
|
||||
_worker_shared_data_key = data_key
|
||||
|
||||
# Rebuild spatial index for new data
|
||||
t0 = time.perf_counter()
|
||||
if _worker_shared_buildings:
|
||||
from app.services.spatial_index import SpatialIndex
|
||||
_worker_spatial_idx = SpatialIndex()
|
||||
_worker_spatial_idx.build(_worker_shared_buildings)
|
||||
else:
|
||||
_worker_spatial_idx = None
|
||||
_worker_cache_key = data_key
|
||||
t_spatial = time.perf_counter() - t0
|
||||
|
||||
print(
|
||||
f"[WORKER {pid}] Init: terrain_shm={t_terrain_shm*1000:.1f}ms "
|
||||
f"unpickle_bld={t_unpickle_bld*1000:.1f}ms "
|
||||
f"unpickle_osm={t_unpickle_osm*1000:.1f}ms "
|
||||
f"spatial={t_spatial*1000:.1f}ms "
|
||||
f"buildings={len(_worker_shared_buildings or [])} "
|
||||
f"tiles={len(terrain_cache)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
print(
|
||||
f"[WORKER {pid}] Processing chunk {_worker_chunk_count}, "
|
||||
f"cached={cached}, points={len(chunk)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
buildings = _worker_shared_buildings or []
|
||||
osm_data = _worker_shared_osm_data or {}
|
||||
|
||||
# ── Imports + object creation (timed) ──
|
||||
t0 = time.perf_counter()
|
||||
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
|
||||
t_import = time.perf_counter() - t0
|
||||
|
||||
t0 = time.perf_counter()
|
||||
site = SiteParams(**config['site_dict'])
|
||||
settings = CoverageSettings(**config['settings_dict'])
|
||||
svc = CoverageService()
|
||||
t_pydantic = time.perf_counter() - t0
|
||||
|
||||
timing = {
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
|
||||
}
|
||||
|
||||
precomputed = config.get('precomputed')
|
||||
|
||||
streets = osm_data.get('streets', [])
|
||||
water = osm_data.get('water_bodies', [])
|
||||
veg = osm_data.get('vegetation_areas', [])
|
||||
site_elev = config['site_elevation']
|
||||
|
||||
t_init_done = time.perf_counter()
|
||||
init_ms = (t_init_done - t_worker_start) * 1000
|
||||
|
||||
# ── Process points with per-point profiling (first 3 only) ──
|
||||
results = []
|
||||
t_loop_start = time.perf_counter()
|
||||
t_model_dump_total = 0.0
|
||||
n_dumped = 0
|
||||
|
||||
for i, (lat, lon, point_elev) in enumerate(chunk):
|
||||
pre = precomputed.get((lat, lon)) if precomputed else None
|
||||
|
||||
# Snapshot timing dict before call (for first 3 points)
|
||||
if i < 3:
|
||||
timing_before = {k: v for k, v in timing.items()}
|
||||
t_pt = time.perf_counter()
|
||||
|
||||
point = svc._calculate_point_sync(
|
||||
site, lat, lon, settings,
|
||||
buildings, streets,
|
||||
_worker_spatial_idx, water, veg,
|
||||
site_elev, point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
precomputed_has_los=pre.get('has_los') if pre else None,
|
||||
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
|
||||
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
|
||||
)
|
||||
|
||||
if i < 3:
|
||||
t_pt_done = time.perf_counter()
|
||||
pt_ms = (t_pt_done - t_pt) * 1000
|
||||
deltas = {k: (timing[k] - timing_before.get(k, 0)) * 1000 for k in timing}
|
||||
parts = " ".join(f"{k}={v:.2f}" for k, v in deltas.items() if v > 0.001)
|
||||
print(
|
||||
f"[WORKER {pid}] Point {i}: {pt_ms:.2f}ms "
|
||||
f"rsrp={point.rsrp:.1f} dist={point.distance:.0f}m "
|
||||
f"breakdown=[{parts}]",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
if point.rsrp >= settings.min_signal:
|
||||
t_md = time.perf_counter()
|
||||
results.append(point.model_dump())
|
||||
t_model_dump_total += time.perf_counter() - t_md
|
||||
n_dumped += 1
|
||||
|
||||
t_loop_done = time.perf_counter()
|
||||
loop_ms = (t_loop_done - t_loop_start) * 1000
|
||||
total_ms = (t_loop_done - t_worker_start) * 1000
|
||||
avg_pt = loop_ms / len(chunk) if chunk else 0
|
||||
avg_dump = (t_model_dump_total * 1000 / n_dumped) if n_dumped else 0
|
||||
|
||||
print(
|
||||
f"[WORKER {pid}] Chunk done: total={total_ms:.0f}ms "
|
||||
f"init={init_ms:.0f}ms loop={loop_ms:.0f}ms "
|
||||
f"avg_pt={avg_pt:.2f}ms model_dump={avg_dump:.2f}ms×{n_dumped} "
|
||||
f"import={t_import*1000:.1f}ms pydantic={t_pydantic*1000:.1f}ms "
|
||||
f"terrain_shm={t_terrain_shm*1000:.1f}ms "
|
||||
f"results={len(results)}/{len(chunk)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _calculate_with_process_pool(
|
||||
grid, point_elevations, site_dict, settings_dict,
|
||||
terrain_cache, buildings, streets, water_bodies,
|
||||
vegetation_areas, site_elevation,
|
||||
num_workers, log_fn, cancel_token=None, precomputed=None,
|
||||
progress_fn=None,
|
||||
):
|
||||
"""Execute using ProcessPoolExecutor with reduced workers to limit memory."""
|
||||
"""Execute using ProcessPoolExecutor.
|
||||
|
||||
Uses shared memory for terrain tiles (zero-copy numpy views), buildings,
|
||||
and OSM data (pickle-once, read-many) to eliminate per-chunk serialization
|
||||
overhead.
|
||||
"""
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
total_points = len(grid)
|
||||
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers")
|
||||
building_count = len(buildings)
|
||||
data_items = building_count + len(streets) + len(water_bodies) + len(vegetation_areas)
|
||||
|
||||
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers, "
|
||||
f"{building_count} buildings, {data_items} total OSM items")
|
||||
|
||||
# Log memory at start
|
||||
try:
|
||||
with open('/proc/self/status') as f:
|
||||
for line in f:
|
||||
if line.startswith('VmRSS:'):
|
||||
log_fn(f"Memory before calculation: {line.strip()}")
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Store terrain tiles in shared memory
|
||||
shm_blocks = []
|
||||
terrain_shm_refs = {}
|
||||
try:
|
||||
shm_blocks, terrain_shm_refs = _store_terrain_in_shm(terrain_cache, log_fn)
|
||||
if terrain_shm_refs:
|
||||
tile_mb = sum(
|
||||
np.prod(r['shape']) * np.dtype(r['dtype']).itemsize
|
||||
for r in terrain_shm_refs.values()
|
||||
) / (1024 * 1024)
|
||||
log_fn(f"Stored {len(terrain_shm_refs)} terrain tiles in shared memory ({tile_mb:.0f} MB)")
|
||||
use_shm = True
|
||||
else:
|
||||
use_shm = False
|
||||
except Exception as e:
|
||||
log_fn(f"Shared memory setup failed ({e}), using pickle fallback")
|
||||
use_shm = False
|
||||
|
||||
# Store buildings + OSM data in shared memory (pickle once, read many)
|
||||
shared_data_refs = {}
|
||||
if use_shm:
|
||||
bld_block, bld_ref = _store_pickle_in_shm(buildings, "Buildings", log_fn)
|
||||
if bld_block:
|
||||
shm_blocks.append(bld_block)
|
||||
shared_data_refs['buildings'] = bld_ref
|
||||
|
||||
osm_data_dict = {
|
||||
'streets': streets,
|
||||
'water_bodies': water_bodies,
|
||||
'vegetation_areas': vegetation_areas,
|
||||
}
|
||||
osm_block, osm_ref = _store_pickle_in_shm(osm_data_dict, "OSM data", log_fn)
|
||||
if osm_block:
|
||||
shm_blocks.append(osm_block)
|
||||
shared_data_refs['osm_data'] = osm_ref
|
||||
|
||||
items = [
|
||||
(lat, lon, point_elevations.get((lat, lon), 0.0))
|
||||
for lat, lon in grid
|
||||
]
|
||||
|
||||
# Larger chunks than Ray — fewer workers means bigger chunks
|
||||
chunk_size = max(1, len(items) // (num_workers * 2))
|
||||
# Target larger chunks to amortize IPC overhead (was num_workers*2)
|
||||
chunk_size = max(1, min(400, len(items) // max(2, num_workers)))
|
||||
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
|
||||
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
|
||||
|
||||
cache_key = f"{site_dict['lat']:.4f},{site_dict['lon']:.4f},{len(buildings)}"
|
||||
config = {
|
||||
'site_dict': site_dict,
|
||||
'settings_dict': settings_dict,
|
||||
'site_elevation': site_elevation,
|
||||
'cache_key': cache_key,
|
||||
}
|
||||
if precomputed:
|
||||
config['precomputed'] = precomputed
|
||||
@@ -505,20 +965,54 @@ def _calculate_with_process_pool(
|
||||
pool = None
|
||||
|
||||
try:
|
||||
# Use spawn context for clean worker processes
|
||||
ctx = mp.get_context('spawn')
|
||||
pool = ProcessPoolExecutor(max_workers=num_workers, mp_context=ctx)
|
||||
futures = {
|
||||
pool.submit(
|
||||
_pool_worker_process_chunk,
|
||||
(chunk, terrain_cache, buildings, osm_data, config),
|
||||
): i
|
||||
for i, chunk in enumerate(chunks)
|
||||
}
|
||||
_set_active_pool(pool)
|
||||
|
||||
if use_shm and shared_data_refs:
|
||||
# Full shared memory path: terrain + buildings + OSM all via shm
|
||||
worker_fn = _pool_worker_shm_shared
|
||||
futures = {
|
||||
pool.submit(
|
||||
worker_fn,
|
||||
(chunk, terrain_shm_refs, shared_data_refs, config),
|
||||
): i
|
||||
for i, chunk in enumerate(chunks)
|
||||
}
|
||||
elif use_shm and data_items <= 2000:
|
||||
# Terrain-only shm — buildings/OSM pickled per chunk.
|
||||
# Only safe for small datasets; large datasets would OOM from
|
||||
# pickle copies (num_chunks × pickle_size).
|
||||
log_fn(f"Terrain-only shm (small data: {data_items} items)")
|
||||
worker_fn = _pool_worker_shm_chunk
|
||||
futures = {
|
||||
pool.submit(
|
||||
worker_fn,
|
||||
(chunk, terrain_shm_refs, buildings, osm_data, config),
|
||||
): i
|
||||
for i, chunk in enumerate(chunks)
|
||||
}
|
||||
elif data_items <= 2000:
|
||||
# Full pickle fallback — only safe for small datasets
|
||||
log_fn(f"Full pickle path (small data: {data_items} items)")
|
||||
futures = {
|
||||
pool.submit(
|
||||
_pool_worker_process_chunk,
|
||||
(chunk, terrain_cache, buildings, osm_data, config),
|
||||
): i
|
||||
for i, chunk in enumerate(chunks)
|
||||
}
|
||||
else:
|
||||
# Large dataset + shared memory failed → per-chunk pickle would OOM.
|
||||
# Bail out; caller will fall back to sequential.
|
||||
log_fn(f"Shared memory failed for large dataset ({data_items} items) "
|
||||
f"— skipping ProcessPool to avoid OOM")
|
||||
raise MemoryError(
|
||||
f"Cannot safely pickle {data_items} OSM items per chunk"
|
||||
)
|
||||
|
||||
completed_chunks = 0
|
||||
for future in as_completed(futures):
|
||||
# Check cancellation between chunks
|
||||
if cancel_token and cancel_token.is_cancelled:
|
||||
log_fn(f"Cancelled — cancelling {len(futures) - completed_chunks - 1} pending futures")
|
||||
for f in futures:
|
||||
@@ -539,20 +1033,46 @@ def _calculate_with_process_pool(
|
||||
eta = (total_points - pts) / rate if rate > 0 else 0
|
||||
log_fn(f"Progress: {completed_chunks}/{len(chunks)} chunks ({pct}%) — "
|
||||
f"{pts} pts, {rate:.0f} pts/s, ETA {eta:.0f}s")
|
||||
if progress_fn:
|
||||
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / len(chunks)))
|
||||
|
||||
except MemoryError:
|
||||
raise # Propagate to caller for sequential fallback
|
||||
|
||||
except Exception as e:
|
||||
log_fn(f"ProcessPool error: {e}")
|
||||
|
||||
finally:
|
||||
# CRITICAL: Always cleanup pool and orphaned workers
|
||||
_clear_active_pool()
|
||||
if pool:
|
||||
pool.shutdown(wait=False, cancel_futures=True)
|
||||
# Give pool time to cleanup gracefully
|
||||
time.sleep(0.5)
|
||||
# Then force kill any survivors by process name
|
||||
killed = _kill_worker_processes()
|
||||
if killed > 0:
|
||||
log_fn(f"Force killed {killed} orphaned workers")
|
||||
# Cleanup shared memory blocks
|
||||
for block in shm_blocks:
|
||||
try:
|
||||
block.close()
|
||||
block.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
# Release large local references before GC
|
||||
chunks = None # noqa: F841
|
||||
items = None # noqa: F841
|
||||
osm_data = None # noqa: F841
|
||||
shared_data_refs = None # noqa: F841
|
||||
# Force garbage collection to release memory from workers
|
||||
gc.collect()
|
||||
# Log memory after cleanup
|
||||
try:
|
||||
with open('/proc/self/status') as f:
|
||||
for line in f:
|
||||
if line.startswith('VmRSS:'):
|
||||
log_fn(f"Memory after cleanup: {line.strip()}")
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
calc_time = time.time() - t_calc
|
||||
log_fn(f"ProcessPool done: {calc_time:.1f}s, {len(all_results)} results "
|
||||
@@ -561,7 +1081,11 @@ def _calculate_with_process_pool(
|
||||
timing = {
|
||||
"parallel_total": calc_time,
|
||||
"workers": num_workers,
|
||||
"backend": "process_pool",
|
||||
"backend": "process_pool" + (
|
||||
"/shm_full" if (use_shm and shared_data_refs)
|
||||
else "/shm_terrain" if use_shm
|
||||
else "/pickle"
|
||||
),
|
||||
}
|
||||
return all_results, timing
|
||||
|
||||
@@ -573,6 +1097,7 @@ def _calculate_sequential(
|
||||
grid, point_elevations, site_dict, settings_dict,
|
||||
buildings, streets, water_bodies, vegetation_areas,
|
||||
site_elevation, log_fn, cancel_token=None, precomputed=None,
|
||||
progress_fn=None,
|
||||
):
|
||||
"""Sequential fallback — no extra dependencies, runs in calling thread."""
|
||||
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
|
||||
@@ -582,8 +1107,9 @@ def _calculate_sequential(
|
||||
settings = CoverageSettings(**settings_dict)
|
||||
svc = CoverageService()
|
||||
|
||||
spatial_idx = SpatialIndex()
|
||||
spatial_idx = None
|
||||
if buildings:
|
||||
spatial_idx = SpatialIndex()
|
||||
spatial_idx.build(buildings)
|
||||
|
||||
total = len(grid)
|
||||
@@ -592,6 +1118,7 @@ def _calculate_sequential(
|
||||
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
|
||||
"dominant_path": 0.0, "street_canyon": 0.0,
|
||||
"reflection": 0.0, "vegetation": 0.0,
|
||||
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
|
||||
}
|
||||
|
||||
t0 = time.time()
|
||||
@@ -604,6 +1131,8 @@ def _calculate_sequential(
|
||||
|
||||
if i % log_interval == 0:
|
||||
log_fn(f"Sequential: {i}/{total} ({i * 100 // total}%)")
|
||||
if progress_fn:
|
||||
progress_fn("Calculating coverage", 0.40 + 0.55 * (i / total))
|
||||
|
||||
point_elev = point_elevations.get((lat, lon), 0.0)
|
||||
|
||||
@@ -617,6 +1146,9 @@ def _calculate_sequential(
|
||||
site_elevation, point_elev, timing,
|
||||
precomputed_distance=pre.get('distance') if pre else None,
|
||||
precomputed_path_loss=pre.get('path_loss') if pre else None,
|
||||
precomputed_has_los=pre.get('has_los') if pre else None,
|
||||
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
|
||||
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
|
||||
)
|
||||
if point.rsrp >= settings.min_signal:
|
||||
results.append(point.model_dump())
|
||||
@@ -625,6 +1157,9 @@ def _calculate_sequential(
|
||||
log_fn(f"Sequential done: {calc_time:.1f}s, {len(results)} results "
|
||||
f"({calc_time / max(1, total) * 1000:.1f}ms/point)")
|
||||
|
||||
# Force garbage collection after sequential computation
|
||||
gc.collect()
|
||||
|
||||
timing["sequential_total"] = calc_time
|
||||
timing["backend"] = "sequential"
|
||||
return results, timing
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import numpy as np
|
||||
from typing import List, Tuple, Optional
|
||||
from dataclasses import dataclass
|
||||
@@ -24,7 +25,10 @@ class StreetCanyonService:
|
||||
Loss increases at corners/turns.
|
||||
"""
|
||||
|
||||
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
|
||||
OVERPASS_URLS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
|
||||
# Default street widths by type
|
||||
STREET_WIDTHS = {
|
||||
@@ -88,14 +92,24 @@ class StreetCanyonService:
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
response = await client.post(self.OVERPASS_URL, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
print(f"[Streets] Fetch error: {e}")
|
||||
return []
|
||||
data = None
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
|
||||
try:
|
||||
timeout = 60.0 * (attempt + 1)
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.post(url, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[Streets] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
await asyncio.sleep(2 ** attempt)
|
||||
else:
|
||||
print(f"[Streets] All {max_retries} attempts failed")
|
||||
return []
|
||||
|
||||
streets = self._parse_streets(data)
|
||||
|
||||
|
||||
@@ -20,8 +20,24 @@ class TerrainService:
|
||||
"""
|
||||
|
||||
SRTM_SOURCES = [
|
||||
"https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
"https://s3.amazonaws.com/elevation-tiles-prod/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
# Our tile server — SRTM1 (30m) preferred, uncompressed
|
||||
{
|
||||
"url": "https://terra.eliah.one/srtm1/{tile_name}.hgt",
|
||||
"compressed": False,
|
||||
"resolution": "srtm1",
|
||||
},
|
||||
# Our tile server — SRTM3 (90m) fallback
|
||||
{
|
||||
"url": "https://terra.eliah.one/srtm3/{tile_name}.hgt",
|
||||
"compressed": False,
|
||||
"resolution": "srtm3",
|
||||
},
|
||||
# Public AWS mirror — SRTM1, gzip compressed
|
||||
{
|
||||
"url": "https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
"compressed": True,
|
||||
"resolution": "srtm1",
|
||||
},
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
@@ -48,7 +64,7 @@ class TerrainService:
|
||||
return self.terrain_path / f"{tile_name}.hgt"
|
||||
|
||||
async def download_tile(self, tile_name: str) -> bool:
|
||||
"""Download SRTM tile if not cached locally"""
|
||||
"""Download SRTM tile from configured sources, preferring highest resolution."""
|
||||
tile_path = self.get_tile_path(tile_name)
|
||||
|
||||
if tile_path.exists():
|
||||
@@ -56,37 +72,54 @@ class TerrainService:
|
||||
|
||||
lat_dir = tile_name[:3] # e.g., "N48"
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
for source_url in self.SRTM_SOURCES:
|
||||
url = source_url.format(lat_dir=lat_dir, tile_name=tile_name)
|
||||
async with httpx.AsyncClient(timeout=60.0, follow_redirects=True) as client:
|
||||
for source in self.SRTM_SOURCES:
|
||||
url = source["url"].format(lat_dir=lat_dir, tile_name=tile_name)
|
||||
try:
|
||||
response = await client.get(url)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.content
|
||||
|
||||
if url.endswith('.gz'):
|
||||
data = gzip.decompress(data)
|
||||
elif url.endswith('.zip'):
|
||||
with zipfile.ZipFile(io.BytesIO(data)) as zf:
|
||||
for name in zf.namelist():
|
||||
if name.endswith('.hgt'):
|
||||
data = zf.read(name)
|
||||
break
|
||||
# Skip empty responses
|
||||
if len(data) < 1000:
|
||||
continue
|
||||
|
||||
if source["compressed"]:
|
||||
if url.endswith('.gz'):
|
||||
data = gzip.decompress(data)
|
||||
elif url.endswith('.zip'):
|
||||
with zipfile.ZipFile(io.BytesIO(data)) as zf:
|
||||
for name in zf.namelist():
|
||||
if name.endswith('.hgt'):
|
||||
data = zf.read(name)
|
||||
break
|
||||
|
||||
# Validate tile size (SRTM1: 25,934,402 bytes, SRTM3: 2,884,802 bytes)
|
||||
if len(data) not in (3601 * 3601 * 2, 1201 * 1201 * 2):
|
||||
print(f"[Terrain] Invalid tile size {len(data)} from {url}")
|
||||
continue
|
||||
|
||||
tile_path.write_bytes(data)
|
||||
print(f"[Terrain] Downloaded {tile_name} ({len(data)} bytes)")
|
||||
res = source["resolution"]
|
||||
size_mb = len(data) / 1048576
|
||||
print(f"[Terrain] Downloaded {tile_name} ({res}, {size_mb:.1f} MB)")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"[Terrain] Failed from {url}: {e}")
|
||||
continue
|
||||
|
||||
print(f"[Terrain] Could not download {tile_name}")
|
||||
print(f"[Terrain] Could not download {tile_name} from any source")
|
||||
return False
|
||||
|
||||
def _load_tile(self, tile_name: str) -> Optional[np.ndarray]:
|
||||
"""Load tile from disk into memory cache"""
|
||||
"""Load tile from disk into memory cache using memory-mapped I/O.
|
||||
|
||||
Uses np.memmap so the OS pages data from disk on demand — near-zero
|
||||
upfront RAM cost per tile (~25 MB savings each vs full load).
|
||||
Falls back to np.frombuffer if memmap fails.
|
||||
"""
|
||||
# Check memory cache first
|
||||
if tile_name in self._tile_cache:
|
||||
return self._tile_cache[tile_name]
|
||||
@@ -97,18 +130,26 @@ class TerrainService:
|
||||
return None
|
||||
|
||||
try:
|
||||
data = tile_path.read_bytes()
|
||||
file_size = tile_path.stat().st_size
|
||||
|
||||
# SRTM HGT format: big-endian signed 16-bit integers
|
||||
if len(data) == 3601 * 3601 * 2:
|
||||
if file_size == 3601 * 3601 * 2:
|
||||
size = 3601 # SRTM1 (30m)
|
||||
elif len(data) == 1201 * 1201 * 2:
|
||||
elif file_size == 1201 * 1201 * 2:
|
||||
size = 1201 # SRTM3 (90m)
|
||||
else:
|
||||
print(f"[Terrain] Unknown tile size: {len(data)} bytes for {tile_name}")
|
||||
print(f"[Terrain] Unknown tile size: {file_size} bytes for {tile_name}")
|
||||
return None
|
||||
|
||||
tile = np.frombuffer(data, dtype='>i2').reshape((size, size))
|
||||
# Memory-mapped loading — OS pages from disk, near-zero RAM
|
||||
try:
|
||||
tile = np.memmap(
|
||||
tile_path, dtype='>i2', mode='r', shape=(size, size),
|
||||
)
|
||||
except Exception:
|
||||
# Fallback: full load into RAM
|
||||
data = tile_path.read_bytes()
|
||||
tile = np.frombuffer(data, dtype='>i2').reshape((size, size))
|
||||
|
||||
# Manage memory cache with LRU eviction
|
||||
if len(self._tile_cache) >= self._max_cache_tiles:
|
||||
@@ -136,56 +177,179 @@ class TerrainService:
|
||||
|
||||
return self._load_tile(tile_name)
|
||||
|
||||
def _bilinear_sample(self, tile: np.ndarray, lat: float, lon: float) -> float:
|
||||
"""Sample elevation with bilinear interpolation for sub-meter accuracy.
|
||||
|
||||
SRTM1 at 30m means nearest-neighbor can have 15m positional error.
|
||||
Bilinear interpolation reduces this to sub-meter accuracy.
|
||||
"""
|
||||
size = tile.shape[0]
|
||||
|
||||
# Tile southwest corner
|
||||
lat_int = int(lat) if lat >= 0 else int(lat) - 1
|
||||
lon_int = int(lon) if lon >= 0 else int(lon) - 1
|
||||
|
||||
# Fractional position within tile (0.0 to 1.0)
|
||||
lat_frac = lat - lat_int # 0 = south edge, 1 = north edge
|
||||
lon_frac = lon - lon_int # 0 = west edge, 1 = east edge
|
||||
|
||||
# Convert to row/col (note: rows go north to south!)
|
||||
row_exact = (1.0 - lat_frac) * (size - 1) # 0 = north, size-1 = south
|
||||
col_exact = lon_frac * (size - 1) # 0 = west, size-1 = east
|
||||
|
||||
# Four surrounding grid points
|
||||
r0 = int(row_exact)
|
||||
c0 = int(col_exact)
|
||||
r1 = min(r0 + 1, size - 1)
|
||||
c1 = min(c0 + 1, size - 1)
|
||||
|
||||
# Fractional position between grid points
|
||||
dr = row_exact - r0
|
||||
dc = col_exact - c0
|
||||
|
||||
# Get four corner values
|
||||
z00 = tile[r0, c0]
|
||||
z01 = tile[r0, c1]
|
||||
z10 = tile[r1, c0]
|
||||
z11 = tile[r1, c1]
|
||||
|
||||
# Handle void (-32768) values - fall back to nearest valid
|
||||
void_val = -32768
|
||||
corners = [(z00, r0, c0), (z01, r0, c1), (z10, r1, c0), (z11, r1, c1)]
|
||||
if z00 == void_val or z01 == void_val or z10 == void_val or z11 == void_val:
|
||||
valid = [(z, r, c) for z, r, c in corners if z != void_val]
|
||||
if not valid:
|
||||
return 0.0
|
||||
# Return nearest valid value
|
||||
return float(valid[0][0])
|
||||
|
||||
# Bilinear interpolation
|
||||
elevation = (z00 * (1 - dr) * (1 - dc) +
|
||||
z01 * (1 - dr) * dc +
|
||||
z10 * dr * (1 - dc) +
|
||||
z11 * dr * dc)
|
||||
|
||||
return float(elevation)
|
||||
|
||||
async def get_elevation(self, lat: float, lon: float) -> float:
|
||||
"""Get elevation at specific coordinate (meters above sea level)"""
|
||||
"""Get elevation at specific coordinate with bilinear interpolation."""
|
||||
tile_name = self.get_tile_name(lat, lon)
|
||||
tile = await self.load_tile(tile_name)
|
||||
|
||||
if tile is None:
|
||||
return 0.0
|
||||
|
||||
size = tile.shape[0]
|
||||
|
||||
# Calculate position within tile
|
||||
lat_int = int(lat) if lat >= 0 else int(lat) - 1
|
||||
lon_int = int(lon) if lon >= 0 else int(lon) - 1
|
||||
|
||||
lat_frac = lat - lat_int
|
||||
lon_frac = lon - lon_int
|
||||
|
||||
# Row 0 = north edge, last row = south edge
|
||||
row = int((1 - lat_frac) * (size - 1))
|
||||
col = int(lon_frac * (size - 1))
|
||||
|
||||
row = max(0, min(row, size - 1))
|
||||
col = max(0, min(col, size - 1))
|
||||
|
||||
elevation = tile[row, col]
|
||||
|
||||
# -32768 = void/no data
|
||||
if elevation == -32768:
|
||||
return 0.0
|
||||
|
||||
return float(elevation)
|
||||
return self._bilinear_sample(tile, lat, lon)
|
||||
|
||||
def get_elevation_sync(self, lat: float, lon: float) -> float:
|
||||
"""Sync elevation lookup from memory cache. Returns 0.0 if tile not loaded."""
|
||||
"""Sync elevation lookup with bilinear interpolation. Returns 0.0 if tile not loaded."""
|
||||
tile_name = self.get_tile_name(lat, lon)
|
||||
tile = self._tile_cache.get(tile_name)
|
||||
if tile is None:
|
||||
return 0.0
|
||||
|
||||
size = tile.shape[0]
|
||||
lat_int = int(lat) if lat >= 0 else int(lat) - 1
|
||||
lon_int = int(lon) if lon >= 0 else int(lon) - 1
|
||||
return self._bilinear_sample(tile, lat, lon)
|
||||
|
||||
row = int((1 - (lat - lat_int)) * (size - 1))
|
||||
col = int((lon - lon_int) * (size - 1))
|
||||
row = max(0, min(row, size - 1))
|
||||
col = max(0, min(col, size - 1))
|
||||
def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
|
||||
"""Vectorized elevation lookup with bilinear interpolation.
|
||||
|
||||
elevation = tile[row, col]
|
||||
return 0.0 if elevation == -32768 else float(elevation)
|
||||
Handles points spanning multiple tiles efficiently.
|
||||
Groups points by tile, processes each tile with full NumPy vectorization.
|
||||
Tiles must be pre-loaded into memory cache.
|
||||
|
||||
Args:
|
||||
lats: Array of latitudes
|
||||
lons: Array of longitudes
|
||||
|
||||
Returns:
|
||||
Array of elevations (0.0 for missing tiles or void data)
|
||||
"""
|
||||
elevations = np.zeros(len(lats), dtype=np.float32)
|
||||
|
||||
# Compute tile indices for each point
|
||||
lat_ints = np.floor(lats).astype(int)
|
||||
lon_ints = np.floor(lons).astype(int)
|
||||
|
||||
# Group by tile using unique key
|
||||
unique_tiles = set(zip(lat_ints, lon_ints))
|
||||
|
||||
for lat_int, lon_int in unique_tiles:
|
||||
# Get tile name
|
||||
lat_letter = 'N' if lat_int >= 0 else 'S'
|
||||
lon_letter = 'E' if lon_int >= 0 else 'W'
|
||||
tile_name = f"{lat_letter}{abs(lat_int):02d}{lon_letter}{abs(lon_int):03d}"
|
||||
|
||||
tile = self._tile_cache.get(tile_name)
|
||||
if tile is None:
|
||||
continue
|
||||
|
||||
# Mask for points in this tile
|
||||
mask = (lat_ints == lat_int) & (lon_ints == lon_int)
|
||||
tile_lats = lats[mask]
|
||||
tile_lons = lons[mask]
|
||||
|
||||
size = tile.shape[0]
|
||||
|
||||
# Vectorized bilinear interpolation for all points in this tile
|
||||
lat_frac = tile_lats - lat_int
|
||||
lon_frac = tile_lons - lon_int
|
||||
|
||||
row_exact = (1.0 - lat_frac) * (size - 1)
|
||||
col_exact = lon_frac * (size - 1)
|
||||
|
||||
r0 = np.clip(row_exact.astype(int), 0, size - 2)
|
||||
c0 = np.clip(col_exact.astype(int), 0, size - 2)
|
||||
r1 = r0 + 1
|
||||
c1 = c0 + 1
|
||||
|
||||
dr = row_exact - r0
|
||||
dc = col_exact - c0
|
||||
|
||||
# Get four corner values for all points at once
|
||||
z00 = tile[r0, c0].astype(np.float32)
|
||||
z01 = tile[r0, c1].astype(np.float32)
|
||||
z10 = tile[r1, c0].astype(np.float32)
|
||||
z11 = tile[r1, c1].astype(np.float32)
|
||||
|
||||
# Bilinear interpolation (vectorized)
|
||||
result = (z00 * (1 - dr) * (1 - dc) +
|
||||
z01 * (1 - dr) * dc +
|
||||
z10 * dr * (1 - dc) +
|
||||
z11 * dr * dc)
|
||||
|
||||
# Handle void values (-32768) - set to 0
|
||||
void_mask = (z00 == -32768) | (z01 == -32768) | (z10 == -32768) | (z11 == -32768)
|
||||
result[void_mask] = 0.0
|
||||
|
||||
elevations[mask] = result
|
||||
|
||||
return elevations
|
||||
|
||||
def get_required_tiles(self, center_lat: float, center_lon: float, radius_km: float) -> list:
|
||||
"""Determine which tiles are needed for a coverage calculation."""
|
||||
# Convert radius to degrees (approximate)
|
||||
lat_delta = radius_km / 111.0 # ~111 km per degree latitude
|
||||
lon_delta = radius_km / (111.0 * np.cos(np.radians(center_lat)))
|
||||
|
||||
min_lat = center_lat - lat_delta
|
||||
max_lat = center_lat + lat_delta
|
||||
min_lon = center_lon - lon_delta
|
||||
max_lon = center_lon + lon_delta
|
||||
|
||||
tiles = []
|
||||
for lat in range(int(np.floor(min_lat)), int(np.floor(max_lat)) + 1):
|
||||
for lon in range(int(np.floor(min_lon)), int(np.floor(max_lon)) + 1):
|
||||
lat_letter = 'N' if lat >= 0 else 'S'
|
||||
lon_letter = 'E' if lon >= 0 else 'W'
|
||||
tile_name = f"{lat_letter}{abs(lat):02d}{lon_letter}{abs(lon):03d}"
|
||||
tiles.append(tile_name)
|
||||
|
||||
return tiles
|
||||
|
||||
def get_missing_tiles(self, center_lat: float, center_lon: float, radius_km: float) -> list:
|
||||
"""Check which needed tiles are not available locally."""
|
||||
required = self.get_required_tiles(center_lat, center_lon, radius_km)
|
||||
return [t for t in required if not self.get_tile_path(t).exists()]
|
||||
|
||||
async def get_elevation_profile(
|
||||
self,
|
||||
@@ -272,6 +436,38 @@ class TerrainService:
|
||||
total = sum(f.stat().st_size for f in self.terrain_path.glob("*.hgt"))
|
||||
return total / (1024 * 1024)
|
||||
|
||||
def evict_disk_cache(self, max_size_mb: float = 2048.0):
|
||||
"""LRU eviction of .hgt files when disk cache exceeds max_size_mb.
|
||||
|
||||
Deletes the oldest-accessed files until total size is under the limit.
|
||||
"""
|
||||
hgt_files = list(self.terrain_path.glob("*.hgt"))
|
||||
if not hgt_files:
|
||||
return
|
||||
|
||||
total = sum(f.stat().st_size for f in hgt_files)
|
||||
if total / (1024 * 1024) <= max_size_mb:
|
||||
return
|
||||
|
||||
# Sort by access time (oldest first)
|
||||
hgt_files.sort(key=lambda f: f.stat().st_atime)
|
||||
|
||||
evicted = 0
|
||||
for f in hgt_files:
|
||||
if total / (1024 * 1024) <= max_size_mb:
|
||||
break
|
||||
fsize = f.stat().st_size
|
||||
# Remove from memory cache if loaded
|
||||
stem = f.stem
|
||||
self._tile_cache.pop(stem, None)
|
||||
f.unlink()
|
||||
total -= fsize
|
||||
evicted += 1
|
||||
|
||||
if evicted:
|
||||
print(f"[Terrain] Evicted {evicted} tiles, "
|
||||
f"cache now {total / (1024 * 1024):.0f} MB")
|
||||
|
||||
@staticmethod
|
||||
def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
||||
"""Calculate distance between two points in meters"""
|
||||
|
||||
142
backend/app/services/tile_processor.py
Normal file
142
backend/app/services/tile_processor.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""
|
||||
Tile-based processing for large radius coverage calculations.
|
||||
|
||||
When radius > 10km, the coverage circle is split into 5km sub-tiles.
|
||||
Each tile is processed independently — OSM data and terrain are loaded
|
||||
per-tile and freed between tiles, keeping peak RAM usage bounded.
|
||||
|
||||
Usage:
|
||||
from app.services.tile_processor import (
|
||||
generate_tile_grid, partition_grid_to_tiles,
|
||||
TILE_THRESHOLD_M, get_adaptive_worker_count,
|
||||
)
|
||||
|
||||
if radius_m > TILE_THRESHOLD_M:
|
||||
tiles = generate_tile_grid(center_lat, center_lon, radius_m)
|
||||
tile_grids = partition_grid_to_tiles(grid, tiles)
|
||||
"""
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple, Dict
|
||||
|
||||
|
||||
# Use tiled processing for radius above this threshold
|
||||
TILE_THRESHOLD_M = 10000 # 10 km
|
||||
|
||||
# Default tile size — 5km balances overhead vs memory usage
|
||||
DEFAULT_TILE_SIZE_M = 5000 # 5 km
|
||||
|
||||
|
||||
@dataclass
|
||||
class Tile:
|
||||
"""A rectangular sub-tile of the coverage area."""
|
||||
bbox: Tuple[float, float, float, float] # (min_lat, min_lon, max_lat, max_lon)
|
||||
index: Tuple[int, int] # (row, col) in tile grid
|
||||
|
||||
|
||||
def generate_tile_grid(
|
||||
center_lat: float,
|
||||
center_lon: float,
|
||||
radius_m: float,
|
||||
tile_size_m: float = DEFAULT_TILE_SIZE_M,
|
||||
) -> List[Tile]:
|
||||
"""Generate grid of tiles covering the coverage circle.
|
||||
|
||||
Only includes tiles that actually intersect the coverage circle.
|
||||
Tiles are ordered row-by-row from SW to NE.
|
||||
"""
|
||||
cos_lat = math.cos(math.radians(center_lat))
|
||||
|
||||
# Full coverage area in degrees
|
||||
lat_delta = radius_m / 111000
|
||||
lon_delta = radius_m / (111000 * cos_lat)
|
||||
|
||||
# Number of tiles along each axis
|
||||
n_tiles = max(1, math.ceil(radius_m * 2 / tile_size_m))
|
||||
|
||||
# Tile size in degrees
|
||||
tile_lat = (2 * lat_delta) / n_tiles
|
||||
tile_lon = (2 * lon_delta) / n_tiles
|
||||
|
||||
base_lat = center_lat - lat_delta
|
||||
base_lon = center_lon - lon_delta
|
||||
|
||||
tiles = []
|
||||
for row in range(n_tiles):
|
||||
for col in range(n_tiles):
|
||||
min_lat = base_lat + row * tile_lat
|
||||
max_lat = base_lat + (row + 1) * tile_lat
|
||||
min_lon = base_lon + col * tile_lon
|
||||
max_lon = base_lon + (col + 1) * tile_lon
|
||||
|
||||
bbox = (min_lat, min_lon, max_lat, max_lon)
|
||||
|
||||
if _tile_intersects_circle(bbox, center_lat, center_lon, radius_m, cos_lat):
|
||||
tiles.append(Tile(bbox=bbox, index=(row, col)))
|
||||
|
||||
return tiles
|
||||
|
||||
|
||||
def _tile_intersects_circle(
|
||||
bbox: Tuple[float, float, float, float],
|
||||
center_lat: float,
|
||||
center_lon: float,
|
||||
radius_m: float,
|
||||
cos_lat: float,
|
||||
) -> bool:
|
||||
"""Check if tile bbox intersects the coverage circle.
|
||||
|
||||
Uses fast equirectangular approximation — tiles are small (5km)
|
||||
so full haversine is unnecessary for intersection testing.
|
||||
"""
|
||||
min_lat, min_lon, max_lat, max_lon = bbox
|
||||
|
||||
# Closest point on bbox to circle center
|
||||
closest_lat = max(min_lat, min(center_lat, max_lat))
|
||||
closest_lon = max(min_lon, min(center_lon, max_lon))
|
||||
|
||||
# Approximate distance in meters (equirectangular)
|
||||
dlat = (closest_lat - center_lat) * 111000
|
||||
dlon = (closest_lon - center_lon) * 111000 * cos_lat
|
||||
dist_sq = dlat * dlat + dlon * dlon
|
||||
|
||||
return dist_sq <= radius_m * radius_m
|
||||
|
||||
|
||||
def get_adaptive_worker_count(radius_m: float, base_workers: int) -> int:
|
||||
"""Scale down workers for large calculations to prevent combined memory explosion.
|
||||
|
||||
Large radius = more buildings per tile = more memory per worker.
|
||||
Reducing workers keeps total worker memory bounded.
|
||||
"""
|
||||
if radius_m > 30000:
|
||||
return min(base_workers, 2)
|
||||
elif radius_m > 20000:
|
||||
return min(base_workers, 3)
|
||||
elif radius_m > 10000:
|
||||
return min(base_workers, 4)
|
||||
return base_workers
|
||||
|
||||
|
||||
def partition_grid_to_tiles(
|
||||
grid: List[Tuple[float, float]],
|
||||
tiles: List[Tile],
|
||||
) -> Dict[Tuple[int, int], List[Tuple[float, float]]]:
|
||||
"""Partition grid points into tiles by bbox containment.
|
||||
|
||||
Returns dict mapping tile index -> list of (lat, lon) points.
|
||||
Points on tile boundaries are assigned to the first matching tile.
|
||||
"""
|
||||
tile_grids: Dict[Tuple[int, int], List[Tuple[float, float]]] = {
|
||||
t.index: [] for t in tiles
|
||||
}
|
||||
|
||||
for lat, lon in grid:
|
||||
for tile in tiles:
|
||||
min_lat, min_lon, max_lat, max_lon = tile.bbox
|
||||
if min_lat <= lat <= max_lat and min_lon <= lon <= max_lon:
|
||||
tile_grids[tile.index].append((lat, lon))
|
||||
break
|
||||
|
||||
return tile_grids
|
||||
@@ -6,6 +6,7 @@ Uses ITU-R P.833 approximations for foliage loss.
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import httpx
|
||||
import json
|
||||
from typing import List, Tuple, Optional
|
||||
@@ -20,6 +21,11 @@ class VegetationArea(BaseModel):
|
||||
geometry: List[Tuple[float, float]] # [(lon, lat), ...]
|
||||
vegetation_type: str # forest, wood, scrub, orchard
|
||||
density: str # dense, sparse, mixed
|
||||
# Bounding box for fast rejection (computed from geometry)
|
||||
min_lat: float = 0.0
|
||||
max_lat: float = 0.0
|
||||
min_lon: float = 0.0
|
||||
max_lon: float = 0.0
|
||||
|
||||
|
||||
class VegetationCache:
|
||||
@@ -81,7 +87,10 @@ class VegetationCache:
|
||||
class VegetationService:
|
||||
"""OSM vegetation for signal attenuation"""
|
||||
|
||||
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
|
||||
OVERPASS_URLS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
|
||||
# Attenuation dB per 100 meters of vegetation
|
||||
ATTENUATION_DB_PER_100M = {
|
||||
@@ -123,11 +132,28 @@ class VegetationService:
|
||||
cached = self.cache.get(min_lat, min_lon, max_lat, max_lon)
|
||||
if cached is not None:
|
||||
print(f"[Vegetation] Cache hit for bbox")
|
||||
areas = [VegetationArea(**v) for v in cached]
|
||||
areas = []
|
||||
for v in cached:
|
||||
area = VegetationArea(**v)
|
||||
# Recompute bbox if missing (backward compat with old cache)
|
||||
if area.min_lat == 0.0 and area.max_lat == 0.0 and area.geometry:
|
||||
lons = [p[0] for p in area.geometry]
|
||||
lats = [p[1] for p in area.geometry]
|
||||
area = VegetationArea(
|
||||
id=area.id,
|
||||
geometry=area.geometry,
|
||||
vegetation_type=area.vegetation_type,
|
||||
density=area.density,
|
||||
min_lat=min(lats),
|
||||
max_lat=max(lats),
|
||||
min_lon=min(lons),
|
||||
max_lon=max(lons),
|
||||
)
|
||||
areas.append(area)
|
||||
self._memory_cache[cache_key] = areas
|
||||
return areas
|
||||
|
||||
# Fetch from Overpass
|
||||
# Fetch from Overpass with retry
|
||||
print(f"[Vegetation] Fetching from Overpass API...")
|
||||
|
||||
query = f"""
|
||||
@@ -143,14 +169,26 @@ class VegetationService:
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
response = await client.post(self.OVERPASS_URL, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
print(f"[Vegetation] Fetch error: {e}")
|
||||
return []
|
||||
data = None
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
|
||||
try:
|
||||
timeout = 60.0 * (attempt + 1) # 60s, 120s, 180s
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.post(url, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[Vegetation] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = 2 ** attempt # 1s, 2s
|
||||
print(f"[Vegetation] Retrying in {wait_time}s...")
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
print(f"[Vegetation] All {max_retries} attempts failed")
|
||||
return []
|
||||
|
||||
areas = self._parse_response(data)
|
||||
|
||||
@@ -189,11 +227,19 @@ class VegetationService:
|
||||
leaf_type = tags.get("leaf_type", "mixed")
|
||||
density = "dense" if leaf_type == "needleleaved" else "mixed"
|
||||
|
||||
# Compute bounding box from geometry (lon, lat tuples)
|
||||
lons = [p[0] for p in geometry]
|
||||
lats = [p[1] for p in geometry]
|
||||
|
||||
areas.append(VegetationArea(
|
||||
id=element["id"],
|
||||
geometry=geometry,
|
||||
vegetation_type=veg_type,
|
||||
density=density
|
||||
density=density,
|
||||
min_lat=min(lats),
|
||||
max_lat=max(lats),
|
||||
min_lon=min(lons),
|
||||
max_lon=max(lons),
|
||||
))
|
||||
|
||||
return areas
|
||||
@@ -244,8 +290,12 @@ class VegetationService:
|
||||
lat: float, lon: float,
|
||||
areas: List[VegetationArea]
|
||||
) -> Optional[VegetationArea]:
|
||||
"""Check if point is in vegetation area"""
|
||||
"""Check if point is in vegetation area (with bbox pre-filter)"""
|
||||
for area in areas:
|
||||
# Quick bbox reject - skips 95%+ of polygons
|
||||
if not (area.min_lat <= lat <= area.max_lat and
|
||||
area.min_lon <= lon <= area.max_lon):
|
||||
continue
|
||||
if self._point_in_polygon(lat, lon, area.geometry):
|
||||
return area
|
||||
return None
|
||||
|
||||
@@ -6,6 +6,7 @@ or create multipath interference for RF signals.
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import httpx
|
||||
import json
|
||||
from typing import List, Tuple, Optional
|
||||
@@ -81,7 +82,10 @@ class WaterCache:
|
||||
class WaterService:
|
||||
"""OSM water bodies for reflection calculations"""
|
||||
|
||||
OVERPASS_URL = "https://overpass-api.de/api/interpreter"
|
||||
OVERPASS_URLS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
|
||||
# Reflection coefficients by water type
|
||||
REFLECTION_COEFF = {
|
||||
@@ -132,14 +136,24 @@ class WaterService:
|
||||
out skel qt;
|
||||
"""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
response = await client.post(self.OVERPASS_URL, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
print(f"[Water] Fetch error: {e}")
|
||||
return []
|
||||
data = None
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
url = self.OVERPASS_URLS[attempt % len(self.OVERPASS_URLS)]
|
||||
try:
|
||||
timeout = 60.0 * (attempt + 1)
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.post(url, data={"data": query})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[Water] Overpass attempt {attempt + 1}/{max_retries} failed ({url}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
await asyncio.sleep(2 ** attempt)
|
||||
else:
|
||||
print(f"[Water] All {max_retries} attempts failed")
|
||||
return []
|
||||
|
||||
bodies = self._parse_response(data)
|
||||
|
||||
|
||||
3
backend/app/utils/__init__.py
Normal file
3
backend/app/utils/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Utility modules for RFCP backend.
|
||||
"""
|
||||
34
backend/app/utils/logging.py
Normal file
34
backend/app/utils/logging.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
Structured logging for RFCP backend.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
|
||||
|
||||
_log_file = None
|
||||
|
||||
|
||||
def rfcp_log(tag: str, msg: str):
|
||||
"""Log with tag prefix, timestamp, and thread name.
|
||||
|
||||
Writes to stdout and a log file for reliability.
|
||||
"""
|
||||
global _log_file
|
||||
ts = time.strftime('%H:%M:%S')
|
||||
thr = threading.current_thread().name
|
||||
line = f"[{tag} {ts}] [{thr}] {msg}"
|
||||
print(line, flush=True)
|
||||
|
||||
try:
|
||||
if _log_file is None:
|
||||
log_dir = os.environ.get('RFCP_DATA_PATH', './data')
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, 'rfcp-backend.log')
|
||||
_log_file = open(log_path, 'a')
|
||||
_log_file.write(line + '\n')
|
||||
_log_file.flush()
|
||||
except Exception:
|
||||
pass
|
||||
44
backend/app/utils/progress.py
Normal file
44
backend/app/utils/progress.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
Progress reporting for long-running calculations.
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import Optional, Callable, Awaitable
|
||||
|
||||
|
||||
class ProgressTracker:
|
||||
"""Track and report calculation progress."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
total: int,
|
||||
callback: Optional[Callable[[str, float, Optional[float]], Awaitable[None]]] = None,
|
||||
phase: str = "calculating",
|
||||
):
|
||||
self.total = total
|
||||
self.callback = callback
|
||||
self.phase = phase
|
||||
self.completed = 0
|
||||
self.start_time = time.time()
|
||||
|
||||
@property
|
||||
def progress(self) -> float:
|
||||
if self.total == 0:
|
||||
return 1.0
|
||||
return self.completed / self.total
|
||||
|
||||
@property
|
||||
def eta_seconds(self) -> Optional[float]:
|
||||
if self.completed == 0:
|
||||
return None
|
||||
elapsed = time.time() - self.start_time
|
||||
rate = self.completed / elapsed
|
||||
remaining = self.total - self.completed
|
||||
return remaining / rate if rate > 0 else None
|
||||
|
||||
def update(self, n: int = 1):
|
||||
self.completed += n
|
||||
|
||||
async def report(self):
|
||||
if self.callback:
|
||||
await self.callback(self.phase, self.progress, self.eta_seconds)
|
||||
54
backend/app/utils/units.py
Normal file
54
backend/app/utils/units.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
RF unit conversions.
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
|
||||
def dbm_to_watts(dbm: float) -> float:
|
||||
"""Convert dBm to watts."""
|
||||
return 10 ** ((dbm - 30) / 10)
|
||||
|
||||
|
||||
def watts_to_dbm(watts: float) -> float:
|
||||
"""Convert watts to dBm."""
|
||||
if watts <= 0:
|
||||
return -float('inf')
|
||||
return 10 * math.log10(watts) + 30
|
||||
|
||||
|
||||
def dbm_to_mw(dbm: float) -> float:
|
||||
"""Convert dBm to milliwatts."""
|
||||
return 10 ** (dbm / 10)
|
||||
|
||||
|
||||
def mw_to_dbm(mw: float) -> float:
|
||||
"""Convert milliwatts to dBm."""
|
||||
if mw <= 0:
|
||||
return -float('inf')
|
||||
return 10 * math.log10(mw)
|
||||
|
||||
|
||||
def frequency_to_wavelength(frequency_mhz: float) -> float:
|
||||
"""Convert frequency (MHz) to wavelength (meters)."""
|
||||
return 300.0 / frequency_mhz
|
||||
|
||||
|
||||
def wavelength_to_frequency(wavelength_m: float) -> float:
|
||||
"""Convert wavelength (meters) to frequency (MHz)."""
|
||||
return 300.0 / wavelength_m
|
||||
|
||||
|
||||
def eirp_dbm(power_dbm: float, gain_dbi: float) -> float:
|
||||
"""Calculate EIRP in dBm."""
|
||||
return power_dbm + gain_dbi
|
||||
|
||||
|
||||
def eirp_watts(power_dbm: float, gain_dbi: float) -> float:
|
||||
"""Calculate EIRP in watts."""
|
||||
return dbm_to_watts(power_dbm + gain_dbi)
|
||||
|
||||
|
||||
def path_loss_to_signal_dbm(power_dbm: float, gain_dbi: float, path_loss_db: float) -> float:
|
||||
"""Calculate received signal level in dBm from EIRP and path loss."""
|
||||
return power_dbm + gain_dbi - path_loss_db
|
||||
8
backend/requirements-dev.txt
Normal file
8
backend/requirements-dev.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
# Development and testing dependencies
|
||||
# Install with: pip install -r requirements-dev.txt
|
||||
|
||||
pytest>=7.0.0
|
||||
pytest-asyncio>=0.21.0
|
||||
httpx>=0.27.0
|
||||
ruff>=0.1.0
|
||||
mypy>=1.7.0
|
||||
10
backend/requirements-gpu-nvidia.txt
Normal file
10
backend/requirements-gpu-nvidia.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
# NVIDIA GPU acceleration via CuPy
|
||||
# Install with: pip install -r requirements-gpu-nvidia.txt
|
||||
#
|
||||
# Choose ONE based on your CUDA version:
|
||||
# - cupy-cuda12x for CUDA 12.x (RTX 30xx, 40xx, newer)
|
||||
# - cupy-cuda11x for CUDA 11.x (older cards)
|
||||
#
|
||||
# CuPy bundles CUDA runtime (~700 MB) - no separate CUDA install needed
|
||||
|
||||
cupy-cuda12x>=13.0.0
|
||||
14
backend/requirements-gpu-opencl.txt
Normal file
14
backend/requirements-gpu-opencl.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
# Intel/AMD GPU acceleration via PyOpenCL
|
||||
# Install with: pip install -r requirements-gpu-opencl.txt
|
||||
#
|
||||
# Works with:
|
||||
# - Intel UHD/Iris Graphics (integrated)
|
||||
# - AMD Radeon (discrete)
|
||||
# - NVIDIA GPUs (alternative to CUDA)
|
||||
#
|
||||
# Requires OpenCL runtime:
|
||||
# - Intel: Intel GPU Computing Runtime
|
||||
# - AMD: AMD Adrenalin driver (includes OpenCL)
|
||||
# - NVIDIA: NVIDIA driver (includes OpenCL)
|
||||
|
||||
pyopencl>=2023.1
|
||||
@@ -7,6 +7,7 @@ pymongo==4.6.1
|
||||
pydantic-settings==2.1.0
|
||||
numpy==1.26.4
|
||||
scipy==1.12.0
|
||||
shapely>=2.0.0
|
||||
requests==2.31.0
|
||||
httpx==0.27.0
|
||||
aiosqlite>=0.19.0
|
||||
|
||||
@@ -29,7 +29,23 @@ if getattr(sys, 'frozen', False):
|
||||
print(f"[RFCP] Frozen mode, base dir: {base_dir}", flush=True)
|
||||
|
||||
# Fix uvicorn TTY detection — redirect None streams to a log file
|
||||
log_path = os.path.join(base_dir, 'rfcp-server.log')
|
||||
# Use RFCP_LOG_PATH from Electron, or fallback to user-writable location
|
||||
log_dir = os.environ.get('RFCP_LOG_PATH')
|
||||
if not log_dir:
|
||||
if sys.platform == 'win32':
|
||||
appdata = os.environ.get('APPDATA', os.path.expanduser('~'))
|
||||
log_dir = os.path.join(appdata, 'rfcp-desktop', 'logs')
|
||||
else:
|
||||
log_dir = os.path.join(os.path.expanduser('~'), '.rfcp', 'logs')
|
||||
|
||||
try:
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, 'rfcp-server.log')
|
||||
except Exception:
|
||||
# Fallback to temp directory if all else fails
|
||||
import tempfile
|
||||
log_path = os.path.join(tempfile.gettempdir(), 'rfcp-server.log')
|
||||
|
||||
log_file = open(log_path, 'w')
|
||||
if sys.stdout is None:
|
||||
sys.stdout = log_file
|
||||
|
||||
0
backend/tests/__init__.py
Normal file
0
backend/tests/__init__.py
Normal file
0
backend/tests/test_geometry/__init__.py
Normal file
0
backend/tests/test_geometry/__init__.py
Normal file
60
backend/tests/test_geometry/test_diffraction.py
Normal file
60
backend/tests/test_geometry/test_diffraction.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
Unit tests for knife-edge diffraction calculations.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.geometry.diffraction import knife_edge_loss
|
||||
|
||||
|
||||
def freq_to_wl(freq_mhz):
|
||||
return 300.0 / freq_mhz
|
||||
|
||||
|
||||
class TestKnifeEdgeLoss:
|
||||
def test_no_obstruction_low_loss(self):
|
||||
"""Negative h means clearance above LOS — loss should be small."""
|
||||
loss = knife_edge_loss(d1_m=500, d2_m=500, h_m=-10, wavelength_m=freq_to_wl(1800))
|
||||
assert loss >= 0
|
||||
assert loss < 3
|
||||
|
||||
def test_grazing_obstruction(self):
|
||||
"""h=0 means exactly at LOS line — ~6 dB loss."""
|
||||
loss = knife_edge_loss(d1_m=500, d2_m=500, h_m=0, wavelength_m=freq_to_wl(1800))
|
||||
assert 5 < loss < 8
|
||||
|
||||
def test_obstruction_increases_loss(self):
|
||||
wl = freq_to_wl(1800)
|
||||
loss_low = knife_edge_loss(d1_m=500, d2_m=500, h_m=1, wavelength_m=wl)
|
||||
loss_high = knife_edge_loss(d1_m=500, d2_m=500, h_m=10, wavelength_m=wl)
|
||||
assert loss_high > loss_low
|
||||
|
||||
def test_higher_freq_more_loss(self):
|
||||
"""Higher frequency = shorter wavelength = more diffraction loss."""
|
||||
loss_low_f = knife_edge_loss(d1_m=500, d2_m=500, h_m=5, wavelength_m=freq_to_wl(450))
|
||||
loss_high_f = knife_edge_loss(d1_m=500, d2_m=500, h_m=5, wavelength_m=freq_to_wl(1800))
|
||||
assert loss_high_f > loss_low_f
|
||||
|
||||
def test_zero_distance_safe(self):
|
||||
"""Should not crash on zero distances."""
|
||||
loss = knife_edge_loss(d1_m=0, d2_m=500, h_m=5, wavelength_m=freq_to_wl(900))
|
||||
assert loss >= 0
|
||||
|
||||
def test_large_clearance(self):
|
||||
"""Very deep clearance (large negative h) should have near-zero loss."""
|
||||
loss = knife_edge_loss(d1_m=500, d2_m=500, h_m=-50, wavelength_m=freq_to_wl(900))
|
||||
assert loss < 1.0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
instance = TestKnifeEdgeLoss()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
74
backend/tests/test_geometry/test_haversine.py
Normal file
74
backend/tests/test_geometry/test_haversine.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
Unit tests for haversine distance calculations.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.geometry.haversine import haversine_distance, haversine_batch, points_to_local_coords
|
||||
|
||||
|
||||
class TestHaversineDistance:
|
||||
def test_same_point_is_zero(self):
|
||||
d = haversine_distance(50.45, 30.52, 50.45, 30.52)
|
||||
assert abs(d) < 1.0
|
||||
|
||||
def test_known_distance(self):
|
||||
# Kyiv to Kharkiv ≈ 410 km
|
||||
d = haversine_distance(50.45, 30.52, 49.99, 36.23)
|
||||
assert 400000 < d < 420000
|
||||
|
||||
def test_short_distance(self):
|
||||
# ~111m for 0.001 degree lat
|
||||
d = haversine_distance(50.0, 30.0, 50.001, 30.0)
|
||||
assert 100 < d < 120
|
||||
|
||||
|
||||
class TestHaversineBatch:
|
||||
def test_single_point(self):
|
||||
lats = np.array([50.001])
|
||||
lons = np.array([30.0])
|
||||
distances = haversine_batch(50.0, 30.0, lats, lons)
|
||||
assert len(distances) == 1
|
||||
assert 100 < distances[0] < 120
|
||||
|
||||
def test_multiple_points(self):
|
||||
lats = np.array([50.001, 50.01, 50.1])
|
||||
lons = np.array([30.0, 30.0, 30.0])
|
||||
distances = haversine_batch(50.0, 30.0, lats, lons)
|
||||
assert len(distances) == 3
|
||||
# Should be monotonically increasing
|
||||
assert distances[0] < distances[1] < distances[2]
|
||||
|
||||
|
||||
class TestLocalCoords:
|
||||
def test_same_point_is_origin(self):
|
||||
x, y = points_to_local_coords(50.0, 30.0, np.array([50.0]), np.array([30.0]))
|
||||
assert abs(x[0]) < 1.0
|
||||
assert abs(y[0]) < 1.0
|
||||
|
||||
def test_north_is_positive_y(self):
|
||||
x, y = points_to_local_coords(50.0, 30.0, np.array([50.001]), np.array([30.0]))
|
||||
assert y[0] > 0
|
||||
assert abs(x[0]) < 1.0
|
||||
|
||||
def test_east_is_positive_x(self):
|
||||
x, y = points_to_local_coords(50.0, 30.0, np.array([50.0]), np.array([30.001]))
|
||||
assert x[0] > 0
|
||||
assert abs(y[0]) < 1.0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls in [TestHaversineDistance, TestHaversineBatch, TestLocalCoords]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls.__name__}.{method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {cls.__name__}.{method_name}: {e}")
|
||||
|
||||
print("\nAll tests completed.")
|
||||
77
backend/tests/test_geometry/test_intersection.py
Normal file
77
backend/tests/test_geometry/test_intersection.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
Unit tests for line-segment intersection calculations.
|
||||
|
||||
These require NumPy, so use __main__ block with conditional import.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
import numpy as np
|
||||
from app.geometry.intersection import line_segments_intersect_batch
|
||||
|
||||
|
||||
class TestLineSegmentIntersect:
|
||||
def test_crossing_lines(self):
|
||||
"""Two crossing segments should intersect."""
|
||||
# Line from (0,0)→(1,1) and (0,1)→(1,0)
|
||||
result = line_segments_intersect_batch(
|
||||
p1=np.array([0.0, 0.0]),
|
||||
p2=np.array([1.0, 1.0]),
|
||||
seg_starts=np.array([[0.0, 1.0]]),
|
||||
seg_ends=np.array([[1.0, 0.0]]),
|
||||
)
|
||||
assert result[0] == True
|
||||
|
||||
def test_parallel_lines(self):
|
||||
"""Parallel lines should not intersect."""
|
||||
result = line_segments_intersect_batch(
|
||||
p1=np.array([0.0, 0.0]),
|
||||
p2=np.array([1.0, 0.0]),
|
||||
seg_starts=np.array([[0.0, 1.0]]),
|
||||
seg_ends=np.array([[1.0, 1.0]]),
|
||||
)
|
||||
assert result[0] == False
|
||||
|
||||
def test_non_crossing(self):
|
||||
"""Segments that don't reach each other."""
|
||||
result = line_segments_intersect_batch(
|
||||
p1=np.array([0.0, 0.0]),
|
||||
p2=np.array([0.5, 0.5]),
|
||||
seg_starts=np.array([[0.8, 0.0]]),
|
||||
seg_ends=np.array([[0.8, 1.0]]),
|
||||
)
|
||||
assert result[0] == False
|
||||
|
||||
def test_multiple_segments(self):
|
||||
"""Batch test with multiple segments."""
|
||||
result = line_segments_intersect_batch(
|
||||
p1=np.array([0.0, 0.0]),
|
||||
p2=np.array([1.0, 1.0]),
|
||||
seg_starts=np.array([
|
||||
[0.0, 1.0], # crosses
|
||||
[2.0, 0.0], # doesn't cross
|
||||
[0.5, 0.0], # crosses
|
||||
]),
|
||||
seg_ends=np.array([
|
||||
[1.0, 0.0], # crosses
|
||||
[2.0, 1.0], # doesn't cross
|
||||
[0.5, 1.0], # crosses
|
||||
]),
|
||||
)
|
||||
assert result[0] == True
|
||||
assert result[1] == False
|
||||
assert result[2] == True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
instance = TestLineSegmentIntersect()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
103
backend/tests/test_geometry/test_los.py
Normal file
103
backend/tests/test_geometry/test_los.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Unit tests for line-of-sight and Fresnel zone calculations.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import math
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.geometry.los import fresnel_radius, check_los_terrain
|
||||
|
||||
|
||||
def freq_to_wavelength(freq_mhz):
|
||||
return 300.0 / freq_mhz
|
||||
|
||||
|
||||
class TestFresnelRadius:
|
||||
def test_positive_result(self):
|
||||
r = fresnel_radius(500, 500, freq_to_wavelength(1800))
|
||||
assert r > 0
|
||||
|
||||
def test_symmetric(self):
|
||||
wl = freq_to_wavelength(900)
|
||||
r1 = fresnel_radius(300, 700, wl)
|
||||
r2 = fresnel_radius(700, 300, wl)
|
||||
assert abs(r1 - r2) < 0.001
|
||||
|
||||
def test_lower_freq_larger_radius(self):
|
||||
r_high = fresnel_radius(500, 500, freq_to_wavelength(1800))
|
||||
r_low = fresnel_radius(500, 500, freq_to_wavelength(900))
|
||||
assert r_low > r_high
|
||||
|
||||
def test_center_is_maximum(self):
|
||||
"""Fresnel radius is largest at the midpoint of the path."""
|
||||
wl = freq_to_wavelength(900)
|
||||
r_center = fresnel_radius(500, 500, wl)
|
||||
r_offset = fresnel_radius(200, 800, wl)
|
||||
assert r_center > r_offset
|
||||
|
||||
def test_known_value(self):
|
||||
"""First Fresnel zone radius at midpoint of 1km path at 1GHz ~ 8.66m."""
|
||||
# F1 = sqrt(lambda * d1 * d2 / (d1+d2))
|
||||
# lambda = 0.3m at 1000MHz, d1=d2=500m
|
||||
# F1 = sqrt(0.3 * 500 * 500 / 1000) = sqrt(75) ~ 8.66m
|
||||
r = fresnel_radius(500, 500, freq_to_wavelength(1000))
|
||||
assert 8.0 < r < 9.5
|
||||
|
||||
def test_zero_distance(self):
|
||||
r = fresnel_radius(0, 500, freq_to_wavelength(900))
|
||||
assert r == 0.0
|
||||
|
||||
|
||||
class TestCheckLosTerrain:
|
||||
def test_flat_terrain_has_los(self):
|
||||
profile = [
|
||||
{"elevation": 100, "distance": 0},
|
||||
{"elevation": 100, "distance": 250},
|
||||
{"elevation": 100, "distance": 500},
|
||||
{"elevation": 100, "distance": 750},
|
||||
{"elevation": 100, "distance": 1000},
|
||||
]
|
||||
result = check_los_terrain(profile, tx_height=30, rx_height=1.5)
|
||||
assert result["has_los"] is True
|
||||
assert result["clearance"] > 0
|
||||
|
||||
def test_hill_blocks_los(self):
|
||||
profile = [
|
||||
{"elevation": 100, "distance": 0},
|
||||
{"elevation": 100, "distance": 250},
|
||||
{"elevation": 200, "distance": 500}, # 100m hill
|
||||
{"elevation": 100, "distance": 750},
|
||||
{"elevation": 100, "distance": 1000},
|
||||
]
|
||||
result = check_los_terrain(profile, tx_height=10, rx_height=1.5)
|
||||
assert result["has_los"] is False
|
||||
assert result["blocked_at"] is not None
|
||||
|
||||
def test_empty_profile(self):
|
||||
result = check_los_terrain([], tx_height=30, rx_height=1.5)
|
||||
assert result["has_los"] is True
|
||||
|
||||
def test_high_antenna_clears_hill(self):
|
||||
profile = [
|
||||
{"elevation": 100, "distance": 0},
|
||||
{"elevation": 110, "distance": 500},
|
||||
{"elevation": 100, "distance": 1000},
|
||||
]
|
||||
# TX at 150m (100+50), RX at 101.5m. LOS at 500m ≈ 125.75m, terrain=110m → clear
|
||||
result = check_los_terrain(profile, tx_height=50, rx_height=1.5)
|
||||
assert result["has_los"] is True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls in [TestFresnelRadius, TestCheckLosTerrain]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls.__name__}.{method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {cls.__name__}.{method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
1
backend/tests/test_integration/__init__.py
Normal file
1
backend/tests/test_integration/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
127
backend/tests/test_integration/test_calculator.py
Normal file
127
backend/tests/test_integration/test_calculator.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""
|
||||
Integration tests for the PointCalculator.
|
||||
|
||||
Verifies end-to-end point calculation with various
|
||||
propagation models and environmental conditions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.core.calculator import PointCalculator
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
from app.propagation.okumura_hata import OkumuraHataModel
|
||||
from app.propagation.cost231_hata import Cost231HataModel
|
||||
|
||||
|
||||
class TestPointCalculatorFSPL:
|
||||
def test_basic_calculation(self):
|
||||
calc = PointCalculator(FreeSpaceModel())
|
||||
result = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.001, point_lon=30.0,
|
||||
distance=111,
|
||||
)
|
||||
assert result.rsrp > -50 # Strong signal at short range
|
||||
assert result.has_los is True
|
||||
assert result.model_used == "Free-Space"
|
||||
assert result.path_loss > 0
|
||||
assert result.terrain_loss == 0
|
||||
assert result.building_loss == 0
|
||||
|
||||
def test_signal_decreases_with_distance(self):
|
||||
calc = PointCalculator(FreeSpaceModel())
|
||||
near = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.001, point_lon=30.0, distance=100,
|
||||
)
|
||||
far = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.01, point_lon=30.0, distance=1000,
|
||||
)
|
||||
assert near.rsrp > far.rsrp
|
||||
|
||||
def test_terrain_obstruction(self):
|
||||
calc = PointCalculator(FreeSpaceModel())
|
||||
los = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.01, point_lon=30.0, distance=1000,
|
||||
)
|
||||
nlos = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.01, point_lon=30.0, distance=1000,
|
||||
terrain_clearance=-10,
|
||||
)
|
||||
assert nlos.rsrp < los.rsrp
|
||||
assert nlos.has_los is False
|
||||
assert nlos.terrain_loss > 0
|
||||
|
||||
def test_building_loss_applied(self):
|
||||
calc = PointCalculator(FreeSpaceModel())
|
||||
no_building = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.01, point_lon=30.0, distance=1000,
|
||||
)
|
||||
with_building = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.01, point_lon=30.0, distance=1000,
|
||||
building_loss=20,
|
||||
)
|
||||
assert abs(no_building.rsrp - with_building.rsrp - 20) < 0.1
|
||||
|
||||
|
||||
class TestPointCalculatorAntenna:
|
||||
def test_off_axis_reduces_signal(self):
|
||||
calc = PointCalculator(FreeSpaceModel())
|
||||
omni = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.001, point_lon=30.0, distance=111,
|
||||
)
|
||||
directional = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=1800,
|
||||
point_lat=50.001, point_lon=30.0, distance=111,
|
||||
azimuth=90, beamwidth=65, # Pointing East, point is North
|
||||
)
|
||||
assert directional.rsrp < omni.rsrp
|
||||
|
||||
|
||||
class TestPointCalculatorModelFallback:
|
||||
def test_out_of_range_uses_fspl(self):
|
||||
"""When Okumura-Hata is out of valid range, should fall back to FSPL."""
|
||||
calc = PointCalculator(OkumuraHataModel())
|
||||
# 50m distance is below Okumura-Hata minimum (1km)
|
||||
result = calc.calculate_point(
|
||||
site_lat=50.0, site_lon=30.0, site_height=30,
|
||||
site_power=43, site_gain=18, site_frequency=900,
|
||||
point_lat=50.0, point_lon=30.0001, distance=50,
|
||||
)
|
||||
# Should still return a valid result (via FSPL fallback)
|
||||
assert result.rsrp != 0
|
||||
assert result.path_loss > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls_name, cls in [
|
||||
("FSPL", TestPointCalculatorFSPL),
|
||||
("Antenna", TestPointCalculatorAntenna),
|
||||
("Fallback", TestPointCalculatorModelFallback),
|
||||
]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls_name}.{method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {cls_name}.{method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
115
backend/tests/test_integration/test_engine.py
Normal file
115
backend/tests/test_integration/test_engine.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Integration tests for the CoverageEngine orchestrator.
|
||||
|
||||
Tests model selection, available models API, and the
|
||||
engine's coordination logic (without running actual
|
||||
coverage calculations, which require terrain data).
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.core.engine import CoverageEngine, BandType, PresetType, CoverageSettings
|
||||
|
||||
|
||||
class TestEngineModelSelection:
|
||||
def test_lte_urban_uses_cost231(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.LTE, "urban")
|
||||
assert model.name == "COST-231-Hata"
|
||||
|
||||
def test_lte_suburban_uses_okumura(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.LTE, "suburban")
|
||||
assert model.name == "Okumura-Hata"
|
||||
|
||||
def test_lte_open_uses_fspl(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.LTE, "open")
|
||||
assert model.name == "Free-Space"
|
||||
|
||||
def test_uhf_urban_uses_okumura(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.UHF, "urban")
|
||||
assert model.name == "Okumura-Hata"
|
||||
|
||||
def test_uhf_rural_uses_longley_rice(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.UHF, "rural")
|
||||
assert model.name == "Longley-Rice"
|
||||
|
||||
def test_vhf_urban_uses_p1546(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.VHF, "urban")
|
||||
assert model.name == "ITU-R-P.1546"
|
||||
|
||||
def test_vhf_rural_uses_longley_rice(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.VHF, "rural")
|
||||
assert model.name == "Longley-Rice"
|
||||
|
||||
def test_unknown_band_falls_back(self):
|
||||
engine = CoverageEngine()
|
||||
model = engine.select_model(BandType.CUSTOM, "desert")
|
||||
assert model is not None # Should not crash
|
||||
|
||||
|
||||
class TestEngineModelsAPI:
|
||||
def test_returns_dict(self):
|
||||
engine = CoverageEngine()
|
||||
models = engine.get_available_models()
|
||||
assert isinstance(models, dict)
|
||||
assert len(models) >= 5
|
||||
|
||||
def test_model_info_structure(self):
|
||||
engine = CoverageEngine()
|
||||
models = engine.get_available_models()
|
||||
for name, info in models.items():
|
||||
assert "frequency_range" in info
|
||||
assert "distance_range" in info
|
||||
assert "bands" in info
|
||||
assert len(info["bands"]) > 0
|
||||
|
||||
def test_all_expected_models_present(self):
|
||||
engine = CoverageEngine()
|
||||
models = engine.get_available_models()
|
||||
expected = {"COST-231-Hata", "Okumura-Hata", "Free-Space", "Longley-Rice", "ITU-R-P.1546"}
|
||||
assert expected.issubset(set(models.keys()))
|
||||
|
||||
|
||||
class TestCoverageSettings:
|
||||
def test_default_settings(self):
|
||||
s = CoverageSettings()
|
||||
assert s.radius == 10000
|
||||
assert s.resolution == 200
|
||||
assert s.preset == PresetType.STANDARD
|
||||
assert s.band_type == BandType.LTE
|
||||
|
||||
def test_preset_values(self):
|
||||
assert PresetType.FAST.value == "fast"
|
||||
assert PresetType.STANDARD.value == "standard"
|
||||
assert PresetType.DETAILED.value == "detailed"
|
||||
assert PresetType.FULL.value == "full"
|
||||
|
||||
def test_band_type_values(self):
|
||||
assert BandType.LTE.value == "lte"
|
||||
assert BandType.UHF.value == "uhf"
|
||||
assert BandType.VHF.value == "vhf"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls_name, cls in [
|
||||
("ModelSelection", TestEngineModelSelection),
|
||||
("ModelsAPI", TestEngineModelsAPI),
|
||||
("CoverageSettings", TestCoverageSettings),
|
||||
]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls_name}.{method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {cls_name}.{method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
0
backend/tests/test_models/__init__.py
Normal file
0
backend/tests/test_models/__init__.py
Normal file
90
backend/tests/test_models/test_cost231.py
Normal file
90
backend/tests/test_models/test_cost231.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""
|
||||
Unit tests for COST-231 Hata and COST-231 Walfisch-Ikegami models.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.propagation.base import PropagationInput
|
||||
from app.propagation.cost231_hata import Cost231HataModel
|
||||
from app.propagation.cost231_wi import Cost231WIModel
|
||||
|
||||
|
||||
def make_input(**kwargs) -> PropagationInput:
|
||||
defaults = {
|
||||
"frequency_mhz": 1800,
|
||||
"distance_m": 5000,
|
||||
"tx_height_m": 30,
|
||||
"rx_height_m": 1.5,
|
||||
"environment": "urban",
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return PropagationInput(**defaults)
|
||||
|
||||
|
||||
class TestCost231Hata:
|
||||
def test_typical_range(self):
|
||||
model = Cost231HataModel()
|
||||
out = model.calculate(make_input())
|
||||
assert 130 < out.path_loss_db < 170
|
||||
|
||||
def test_model_name(self):
|
||||
model = Cost231HataModel()
|
||||
assert model.name == "COST-231-Hata"
|
||||
|
||||
def test_frequency_range(self):
|
||||
model = Cost231HataModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=1500))
|
||||
assert model.is_valid_for(make_input(frequency_mhz=2000))
|
||||
assert not model.is_valid_for(make_input(frequency_mhz=900))
|
||||
|
||||
def test_distance_increases_loss(self):
|
||||
model = Cost231HataModel()
|
||||
loss_2 = model.calculate(make_input(distance_m=2000)).path_loss_db
|
||||
loss_10 = model.calculate(make_input(distance_m=10000)).path_loss_db
|
||||
assert loss_10 > loss_2
|
||||
|
||||
def test_urban_vs_suburban(self):
|
||||
model = Cost231HataModel()
|
||||
urban = model.calculate(make_input(environment="urban")).path_loss_db
|
||||
suburban = model.calculate(make_input(environment="suburban")).path_loss_db
|
||||
assert suburban < urban
|
||||
|
||||
|
||||
class TestCost231WI:
|
||||
def test_typical_range(self):
|
||||
model = Cost231WIModel()
|
||||
out = model.calculate(make_input(distance_m=500))
|
||||
assert 80 < out.path_loss_db < 160
|
||||
|
||||
def test_model_name(self):
|
||||
model = Cost231WIModel()
|
||||
assert model.name == "COST-231-WI"
|
||||
|
||||
def test_distance_increases_loss(self):
|
||||
model = Cost231WIModel()
|
||||
loss_200 = model.calculate(make_input(distance_m=200)).path_loss_db
|
||||
loss_1000 = model.calculate(make_input(distance_m=1000)).path_loss_db
|
||||
assert loss_1000 > loss_200
|
||||
|
||||
def test_frequency_range(self):
|
||||
model = Cost231WIModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=800))
|
||||
assert model.is_valid_for(make_input(frequency_mhz=2000))
|
||||
assert not model.is_valid_for(make_input(frequency_mhz=400))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls_name, cls in [("COST231Hata", TestCost231Hata), ("COST231WI", TestCost231WI)]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls_name}.{method_name}")
|
||||
except AssertionError as e:
|
||||
print(f" FAIL {cls_name}.{method_name}: {e}")
|
||||
except Exception as e:
|
||||
print(f" ERROR {cls_name}.{method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
82
backend/tests/test_models/test_free_space.py
Normal file
82
backend/tests/test_models/test_free_space.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Detailed unit tests for the Free Space Path Loss model.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import math
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.propagation.base import PropagationInput
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
|
||||
|
||||
def make_input(**kwargs) -> PropagationInput:
|
||||
defaults = {
|
||||
"frequency_mhz": 1800,
|
||||
"distance_m": 1000,
|
||||
"tx_height_m": 30,
|
||||
"rx_height_m": 1.5,
|
||||
"environment": "urban",
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return PropagationInput(**defaults)
|
||||
|
||||
|
||||
class TestFreeSpaceModel:
|
||||
def test_formula_accuracy(self):
|
||||
"""FSPL = 20*log10(d_km) + 20*log10(f_MHz) + 32.45"""
|
||||
model = FreeSpaceModel()
|
||||
# At 1km, 1000MHz: 20*0 + 20*60 + 32.45 = 92.45 dB
|
||||
out = model.calculate(make_input(distance_m=1000, frequency_mhz=1000))
|
||||
expected = 32.45 + 20 * math.log10(1.0) + 20 * math.log10(1000)
|
||||
assert abs(out.path_loss_db - expected) < 0.1
|
||||
|
||||
def test_6db_per_distance_doubling(self):
|
||||
model = FreeSpaceModel()
|
||||
loss_1 = model.calculate(make_input(distance_m=1000)).path_loss_db
|
||||
loss_2 = model.calculate(make_input(distance_m=2000)).path_loss_db
|
||||
assert abs((loss_2 - loss_1) - 6.02) < 0.1
|
||||
|
||||
def test_6db_per_frequency_doubling(self):
|
||||
model = FreeSpaceModel()
|
||||
loss_1 = model.calculate(make_input(frequency_mhz=900)).path_loss_db
|
||||
loss_2 = model.calculate(make_input(frequency_mhz=1800)).path_loss_db
|
||||
assert abs((loss_2 - loss_1) - 6.02) < 0.1
|
||||
|
||||
def test_always_los(self):
|
||||
model = FreeSpaceModel()
|
||||
out = model.calculate(make_input())
|
||||
assert out.is_los is True
|
||||
|
||||
def test_model_name(self):
|
||||
model = FreeSpaceModel()
|
||||
assert model.name == "Free-Space"
|
||||
|
||||
def test_wide_frequency_range(self):
|
||||
model = FreeSpaceModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=1))
|
||||
assert model.is_valid_for(make_input(frequency_mhz=100000))
|
||||
|
||||
def test_very_short_distance(self):
|
||||
model = FreeSpaceModel()
|
||||
out = model.calculate(make_input(distance_m=10))
|
||||
assert out.path_loss_db > 0
|
||||
assert out.path_loss_db < 80
|
||||
|
||||
def test_very_long_distance(self):
|
||||
model = FreeSpaceModel()
|
||||
out = model.calculate(make_input(distance_m=100000))
|
||||
assert out.path_loss_db > 120
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
instance = TestFreeSpaceModel()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
93
backend/tests/test_models/test_okumura_hata.py
Normal file
93
backend/tests/test_models/test_okumura_hata.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
Detailed unit tests for the Okumura-Hata model.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.propagation.base import PropagationInput
|
||||
from app.propagation.okumura_hata import OkumuraHataModel
|
||||
|
||||
|
||||
def make_input(**kwargs) -> PropagationInput:
|
||||
defaults = {
|
||||
"frequency_mhz": 900,
|
||||
"distance_m": 5000,
|
||||
"tx_height_m": 30,
|
||||
"rx_height_m": 1.5,
|
||||
"environment": "urban",
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return PropagationInput(**defaults)
|
||||
|
||||
|
||||
class TestOkumuraHata:
|
||||
def test_urban_typical_range(self):
|
||||
model = OkumuraHataModel()
|
||||
out = model.calculate(make_input())
|
||||
# 900MHz, 5km, urban: expect ~130-155 dB
|
||||
assert 120 < out.path_loss_db < 160
|
||||
|
||||
def test_environment_ordering(self):
|
||||
"""Urban > suburban > rural path loss."""
|
||||
model = OkumuraHataModel()
|
||||
urban = model.calculate(make_input(environment="urban")).path_loss_db
|
||||
suburban = model.calculate(make_input(environment="suburban")).path_loss_db
|
||||
rural = model.calculate(make_input(environment="rural")).path_loss_db
|
||||
assert urban > suburban > rural
|
||||
|
||||
def test_distance_increases_loss(self):
|
||||
model = OkumuraHataModel()
|
||||
loss_1 = model.calculate(make_input(distance_m=2000)).path_loss_db
|
||||
loss_5 = model.calculate(make_input(distance_m=5000)).path_loss_db
|
||||
loss_10 = model.calculate(make_input(distance_m=10000)).path_loss_db
|
||||
assert loss_1 < loss_5 < loss_10
|
||||
|
||||
def test_frequency_increases_loss(self):
|
||||
model = OkumuraHataModel()
|
||||
loss_450 = model.calculate(make_input(frequency_mhz=450)).path_loss_db
|
||||
loss_900 = model.calculate(make_input(frequency_mhz=900)).path_loss_db
|
||||
assert loss_900 > loss_450
|
||||
|
||||
def test_higher_tx_reduces_loss(self):
|
||||
model = OkumuraHataModel()
|
||||
loss_low = model.calculate(make_input(tx_height_m=10)).path_loss_db
|
||||
loss_high = model.calculate(make_input(tx_height_m=50)).path_loss_db
|
||||
assert loss_high < loss_low
|
||||
|
||||
def test_valid_frequency_range(self):
|
||||
model = OkumuraHataModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=150))
|
||||
assert model.is_valid_for(make_input(frequency_mhz=1500))
|
||||
assert not model.is_valid_for(make_input(frequency_mhz=2000))
|
||||
|
||||
def test_valid_distance_range(self):
|
||||
model = OkumuraHataModel()
|
||||
assert model.is_valid_for(make_input(distance_m=500))
|
||||
assert model.is_valid_for(make_input(distance_m=20000))
|
||||
# Out of range
|
||||
assert not model.is_valid_for(make_input(distance_m=50))
|
||||
|
||||
def test_model_name(self):
|
||||
model = OkumuraHataModel()
|
||||
assert model.name == "Okumura-Hata"
|
||||
|
||||
def test_open_environment(self):
|
||||
"""Open environment should have even less loss than rural."""
|
||||
model = OkumuraHataModel()
|
||||
rural = model.calculate(make_input(environment="rural")).path_loss_db
|
||||
open_area = model.calculate(make_input(environment="open")).path_loss_db
|
||||
assert open_area < rural
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
instance = TestOkumuraHata()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
198
backend/tests/test_models/test_propagation.py
Normal file
198
backend/tests/test_models/test_propagation.py
Normal file
@@ -0,0 +1,198 @@
|
||||
"""
|
||||
Unit tests for propagation models.
|
||||
|
||||
Run: cd backend && python -m pytest tests/test_models/test_propagation.py -v
|
||||
"""
|
||||
|
||||
import math
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add backend to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.propagation.base import PropagationInput
|
||||
from app.propagation.free_space import FreeSpaceModel
|
||||
from app.propagation.okumura_hata import OkumuraHataModel
|
||||
from app.propagation.cost231_hata import Cost231HataModel
|
||||
from app.propagation.cost231_wi import Cost231WIModel
|
||||
from app.propagation.itu_r_p1546 import ITUR_P1546Model
|
||||
from app.propagation.longley_rice import LongleyRiceModel
|
||||
from app.propagation.itu_r_p526 import KnifeEdgeDiffractionModel
|
||||
|
||||
|
||||
def make_input(**kwargs) -> PropagationInput:
|
||||
defaults = {
|
||||
"frequency_mhz": 1800,
|
||||
"distance_m": 1000,
|
||||
"tx_height_m": 30,
|
||||
"rx_height_m": 1.5,
|
||||
"environment": "urban",
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return PropagationInput(**defaults)
|
||||
|
||||
|
||||
class TestFreeSpaceModel:
|
||||
def test_basic_fspl(self):
|
||||
model = FreeSpaceModel()
|
||||
output = model.calculate(make_input(distance_m=1000, frequency_mhz=1800))
|
||||
# FSPL at 1km, 1800MHz ≈ 97.5 dB
|
||||
assert 95 < output.path_loss_db < 100
|
||||
assert output.is_los is True
|
||||
assert output.model_name == "Free-Space"
|
||||
|
||||
def test_distance_increases_loss(self):
|
||||
model = FreeSpaceModel()
|
||||
loss_1km = model.calculate(make_input(distance_m=1000)).path_loss_db
|
||||
loss_2km = model.calculate(make_input(distance_m=2000)).path_loss_db
|
||||
# Doubling distance adds ~6 dB
|
||||
assert 5 < (loss_2km - loss_1km) < 7
|
||||
|
||||
def test_frequency_increases_loss(self):
|
||||
model = FreeSpaceModel()
|
||||
loss_900 = model.calculate(make_input(frequency_mhz=900)).path_loss_db
|
||||
loss_1800 = model.calculate(make_input(frequency_mhz=1800)).path_loss_db
|
||||
# Doubling frequency adds ~6 dB
|
||||
assert 5 < (loss_1800 - loss_900) < 7
|
||||
|
||||
def test_valid_range(self):
|
||||
model = FreeSpaceModel()
|
||||
assert model.is_valid_for(make_input(distance_m=100))
|
||||
assert model.is_valid_for(make_input(distance_m=100000))
|
||||
|
||||
|
||||
class TestOkumuraHata:
|
||||
def test_urban_loss(self):
|
||||
model = OkumuraHataModel()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=900, distance_m=5000,
|
||||
tx_height_m=30, environment="urban",
|
||||
))
|
||||
# Typical urban loss at 5km, 900MHz: 130-150 dB
|
||||
assert 120 < output.path_loss_db < 160
|
||||
assert output.model_name == "Okumura-Hata"
|
||||
|
||||
def test_suburban_less_than_urban(self):
|
||||
model = OkumuraHataModel()
|
||||
inp = make_input(frequency_mhz=900, distance_m=5000)
|
||||
urban = model.calculate(PropagationInput(**{**inp.__dict__, "environment": "urban"}))
|
||||
suburban = model.calculate(PropagationInput(**{**inp.__dict__, "environment": "suburban"}))
|
||||
assert suburban.path_loss_db < urban.path_loss_db
|
||||
|
||||
def test_rural_less_than_suburban(self):
|
||||
model = OkumuraHataModel()
|
||||
inp = make_input(frequency_mhz=900, distance_m=5000)
|
||||
suburban = model.calculate(PropagationInput(**{**inp.__dict__, "environment": "suburban"}))
|
||||
rural = model.calculate(PropagationInput(**{**inp.__dict__, "environment": "rural"}))
|
||||
assert rural.path_loss_db < suburban.path_loss_db
|
||||
|
||||
def test_valid_range(self):
|
||||
model = OkumuraHataModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=900, distance_m=5000))
|
||||
assert not model.is_valid_for(make_input(frequency_mhz=2000, distance_m=5000))
|
||||
|
||||
|
||||
class TestCost231Hata:
|
||||
def test_basic_loss(self):
|
||||
model = Cost231HataModel()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=1800, distance_m=5000,
|
||||
))
|
||||
assert 130 < output.path_loss_db < 170
|
||||
assert output.model_name == "COST-231-Hata"
|
||||
|
||||
def test_valid_range(self):
|
||||
model = Cost231HataModel()
|
||||
assert model.is_valid_for(make_input(frequency_mhz=1800, distance_m=5000))
|
||||
assert not model.is_valid_for(make_input(frequency_mhz=900, distance_m=5000))
|
||||
|
||||
|
||||
class TestCost231WI:
|
||||
def test_basic_loss(self):
|
||||
model = Cost231WIModel()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=1800, distance_m=500,
|
||||
environment="urban",
|
||||
))
|
||||
assert 80 < output.path_loss_db < 160
|
||||
assert output.model_name == "COST-231-WI"
|
||||
|
||||
|
||||
class TestITUR_P1546:
|
||||
def test_basic_loss(self):
|
||||
model = ITUR_P1546Model()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=450, distance_m=10000,
|
||||
))
|
||||
assert 80 < output.path_loss_db < 160
|
||||
assert output.model_name == "ITU-R-P.1546"
|
||||
|
||||
|
||||
class TestLongleyRice:
|
||||
def test_basic_loss(self):
|
||||
model = LongleyRiceModel()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=150, distance_m=20000,
|
||||
terrain_roughness_m=50,
|
||||
))
|
||||
assert 90 < output.path_loss_db < 160
|
||||
assert output.model_name == "Longley-Rice"
|
||||
|
||||
def test_flat_terrain_is_los(self):
|
||||
model = LongleyRiceModel()
|
||||
output = model.calculate(make_input(
|
||||
frequency_mhz=150, distance_m=5000,
|
||||
terrain_roughness_m=5,
|
||||
))
|
||||
assert output.is_los is True
|
||||
|
||||
|
||||
class TestKnifeEdgeDiffraction:
|
||||
def test_no_obstruction(self):
|
||||
loss = KnifeEdgeDiffractionModel.calculate_loss(
|
||||
d1_m=500, d2_m=500, h_m=-5, wavelength_m=0.167,
|
||||
)
|
||||
assert loss >= 0
|
||||
|
||||
def test_obstruction_increases_loss(self):
|
||||
loss_low = KnifeEdgeDiffractionModel.calculate_loss(
|
||||
d1_m=500, d2_m=500, h_m=1, wavelength_m=0.167,
|
||||
)
|
||||
loss_high = KnifeEdgeDiffractionModel.calculate_loss(
|
||||
d1_m=500, d2_m=500, h_m=10, wavelength_m=0.167,
|
||||
)
|
||||
assert loss_high > loss_low
|
||||
|
||||
def test_clearance_loss_positive_clearance(self):
|
||||
loss = KnifeEdgeDiffractionModel.calculate_clearance_loss(5.0, 1800)
|
||||
assert loss == 0.0
|
||||
|
||||
def test_clearance_loss_negative_clearance(self):
|
||||
loss = KnifeEdgeDiffractionModel.calculate_clearance_loss(-10.0, 1800)
|
||||
assert loss > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Quick run without pytest
|
||||
for cls_name, cls in [
|
||||
("FreeSpace", TestFreeSpaceModel),
|
||||
("OkumuraHata", TestOkumuraHata),
|
||||
("COST231Hata", TestCost231Hata),
|
||||
("COST231WI", TestCost231WI),
|
||||
("ITU-R-P.1546", TestITUR_P1546),
|
||||
("LongleyRice", TestLongleyRice),
|
||||
("KnifeEdge", TestKnifeEdgeDiffraction),
|
||||
]:
|
||||
instance = cls()
|
||||
methods = [m for m in dir(instance) if m.startswith("test_")]
|
||||
for method_name in methods:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls_name}.{method_name}")
|
||||
except AssertionError as e:
|
||||
print(f" FAIL {cls_name}.{method_name}: {e}")
|
||||
except Exception as e:
|
||||
print(f" ERROR {cls_name}.{method_name}: {e}")
|
||||
|
||||
print("\nAll tests completed.")
|
||||
1
backend/tests/test_services/__init__.py
Normal file
1
backend/tests/test_services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
126
backend/tests/test_services/test_cache.py
Normal file
126
backend/tests/test_services/test_cache.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""
|
||||
Unit tests for the unified cache service.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from app.services.cache import MemoryCache, CacheManager
|
||||
|
||||
|
||||
class TestMemoryCache:
|
||||
def test_get_miss(self):
|
||||
cache = MemoryCache("test", max_entries=10)
|
||||
assert cache.get("nonexistent") is None
|
||||
|
||||
def test_put_get(self):
|
||||
cache = MemoryCache("test", max_entries=10)
|
||||
cache.put("key1", "value1", size_bytes=100)
|
||||
assert cache.get("key1") == "value1"
|
||||
|
||||
def test_overwrite(self):
|
||||
cache = MemoryCache("test", max_entries=10)
|
||||
cache.put("key1", "v1", size_bytes=100)
|
||||
cache.put("key1", "v2", size_bytes=200)
|
||||
assert cache.get("key1") == "v2"
|
||||
assert cache.size == 1
|
||||
assert cache.size_bytes == 200
|
||||
|
||||
def test_eviction_by_entries(self):
|
||||
cache = MemoryCache("test", max_entries=3)
|
||||
cache.put("a", 1)
|
||||
cache.put("b", 2)
|
||||
cache.put("c", 3)
|
||||
assert cache.size == 3
|
||||
cache.put("d", 4) # Should evict 'a' (LRU)
|
||||
assert cache.size == 3
|
||||
assert cache.get("a") is None
|
||||
assert cache.get("d") == 4
|
||||
|
||||
def test_eviction_by_size(self):
|
||||
cache = MemoryCache("test", max_entries=100, max_size_bytes=300)
|
||||
cache.put("a", 1, size_bytes=100)
|
||||
cache.put("b", 2, size_bytes=100)
|
||||
cache.put("c", 3, size_bytes=100)
|
||||
assert cache.size_bytes == 300
|
||||
cache.put("d", 4, size_bytes=100) # Should evict 'a'
|
||||
assert cache.size_bytes == 300
|
||||
assert cache.get("a") is None
|
||||
|
||||
def test_lru_access_order(self):
|
||||
cache = MemoryCache("test", max_entries=3)
|
||||
cache.put("a", 1)
|
||||
cache.put("b", 2)
|
||||
cache.put("c", 3)
|
||||
# Access 'a' to make it recently used
|
||||
cache.get("a")
|
||||
# Add 'd' — should evict 'b' (now LRU)
|
||||
cache.put("d", 4)
|
||||
assert cache.get("a") == 1 # Still there
|
||||
assert cache.get("b") is None # Evicted
|
||||
|
||||
def test_remove(self):
|
||||
cache = MemoryCache("test", max_entries=10)
|
||||
cache.put("key1", "val", size_bytes=50)
|
||||
assert cache.remove("key1") is True
|
||||
assert cache.get("key1") is None
|
||||
assert cache.size_bytes == 0
|
||||
|
||||
def test_clear(self):
|
||||
cache = MemoryCache("test", max_entries=10)
|
||||
cache.put("a", 1, size_bytes=100)
|
||||
cache.put("b", 2, size_bytes=100)
|
||||
cache.clear()
|
||||
assert cache.size == 0
|
||||
assert cache.size_bytes == 0
|
||||
|
||||
def test_stats(self):
|
||||
cache = MemoryCache("test", max_entries=10, max_size_bytes=1024)
|
||||
cache.put("a", 1, size_bytes=100)
|
||||
cache.get("a") # hit
|
||||
cache.get("b") # miss
|
||||
s = cache.stats()
|
||||
assert s["name"] == "test"
|
||||
assert s["entries"] == 1
|
||||
assert s["hits"] == 1
|
||||
assert s["misses"] == 1
|
||||
assert s["hit_rate"] == 50.0
|
||||
|
||||
|
||||
class TestCacheManager:
|
||||
def test_singleton_structure(self):
|
||||
mgr = CacheManager()
|
||||
assert mgr.terrain is not None
|
||||
assert mgr.buildings is not None
|
||||
assert mgr.spatial is not None
|
||||
assert mgr.osm_disk is not None
|
||||
|
||||
def test_stats(self):
|
||||
mgr = CacheManager()
|
||||
s = mgr.stats()
|
||||
assert "terrain" in s
|
||||
assert "buildings" in s
|
||||
assert "total_memory_mb" in s
|
||||
|
||||
def test_clear_all(self):
|
||||
mgr = CacheManager()
|
||||
mgr.terrain.put("test", "data", 100)
|
||||
mgr.buildings.put("test", "data", 100)
|
||||
mgr.clear_all()
|
||||
assert mgr.terrain.size == 0
|
||||
assert mgr.buildings.size == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for cls_name, cls in [("MemoryCache", TestMemoryCache), ("CacheManager", TestCacheManager)]:
|
||||
instance = cls()
|
||||
for method_name in [m for m in dir(instance) if m.startswith("test_")]:
|
||||
try:
|
||||
getattr(instance, method_name)()
|
||||
print(f" PASS {cls_name}.{method_name}")
|
||||
except Exception as e:
|
||||
print(f" FAIL {cls_name}.{method_name}: {e}")
|
||||
print("\nAll tests completed.")
|
||||
174
desktop/main.js
174
desktop/main.js
@@ -52,9 +52,11 @@ const getLogPath = () => {
|
||||
const getBackendExePath = () => {
|
||||
const exeName = process.platform === 'win32' ? 'rfcp-server.exe' : 'rfcp-server';
|
||||
if (isDev) {
|
||||
return path.join(__dirname, '..', 'backend', exeName);
|
||||
// Dev: use the ONEDIR build output
|
||||
return path.join(__dirname, '..', 'backend', 'dist', 'rfcp-server', exeName);
|
||||
}
|
||||
return getResourcePath('backend', exeName);
|
||||
// Production: ONEDIR structure - backend/rfcp-server/rfcp-server.exe
|
||||
return getResourcePath('backend', 'rfcp-server', exeName);
|
||||
};
|
||||
|
||||
/** Frontend index.html path (production only) */
|
||||
@@ -269,17 +271,33 @@ function createMainWindow() {
|
||||
});
|
||||
|
||||
// Save window state on close and trigger shutdown
|
||||
mainWindow.on('close', () => {
|
||||
mainWindow.on('close', (event) => {
|
||||
log('[CLOSE] Window close event fired, isQuitting=' + isQuitting);
|
||||
try {
|
||||
const bounds = mainWindow.getBounds();
|
||||
store.set('windowState', bounds);
|
||||
} catch (_e) {}
|
||||
isQuitting = true;
|
||||
// Graceful shutdown is async but we also do sync kill as safety net
|
||||
gracefulShutdown().catch(() => {});
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
|
||||
if (!isQuitting) {
|
||||
event.preventDefault();
|
||||
isQuitting = true;
|
||||
|
||||
// Hard timeout: force exit after 5 seconds no matter what
|
||||
const forceExitTimer = setTimeout(() => {
|
||||
log('[CLOSE] Force exit after 5s timeout');
|
||||
killAllRfcpProcesses();
|
||||
process.exit(0);
|
||||
}, 5000);
|
||||
|
||||
gracefulShutdown().then(() => {
|
||||
clearTimeout(forceExitTimer);
|
||||
app.exit(0);
|
||||
}).catch(() => {
|
||||
clearTimeout(forceExitTimer);
|
||||
killAllRfcpProcesses();
|
||||
app.exit(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Load frontend
|
||||
@@ -364,43 +382,93 @@ function killBackend() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Nuclear option: kill ALL rfcp-server processes by name.
|
||||
* This catches orphaned workers that PID-based kill misses.
|
||||
* Aggressive kill: multiple strategies to ensure ALL rfcp-server processes die.
|
||||
* Uses 4 strategies on Windows for maximum reliability.
|
||||
*/
|
||||
function killAllBackendProcesses() {
|
||||
log('[KILL] killAllBackendProcesses() — killing by process name...');
|
||||
function killAllRfcpProcesses() {
|
||||
log('[KILL] === Starting aggressive kill ===');
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
// Strategy 1: Kill by image name (most reliable)
|
||||
try {
|
||||
execSync('taskkill /F /IM rfcp-server.exe /T', {
|
||||
stdio: 'ignore',
|
||||
timeout: 5000
|
||||
log('[KILL] Strategy 1: taskkill /F /IM');
|
||||
execSync('taskkill /F /IM rfcp-server.exe', {
|
||||
stdio: 'pipe',
|
||||
timeout: 5000,
|
||||
windowsHide: true
|
||||
});
|
||||
log('[KILL] taskkill /IM rfcp-server.exe completed');
|
||||
log('[KILL] Strategy 1: SUCCESS');
|
||||
} catch (_e) {
|
||||
// Error means no processes found — OK
|
||||
log('[KILL] No rfcp-server.exe processes found (or already killed)');
|
||||
log('[KILL] Strategy 1: No processes or already killed');
|
||||
}
|
||||
|
||||
// Strategy 2: Kill by PID tree if we have PID
|
||||
if (backendPid) {
|
||||
try {
|
||||
log(`[KILL] Strategy 2: taskkill /F /T /PID ${backendPid}`);
|
||||
execSync(`taskkill /F /T /PID ${backendPid}`, {
|
||||
stdio: 'pipe',
|
||||
timeout: 5000,
|
||||
windowsHide: true
|
||||
});
|
||||
log('[KILL] Strategy 2: SUCCESS');
|
||||
} catch (_e) {
|
||||
log('[KILL] Strategy 2: PID not found');
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy 3: PowerShell kill (backup)
|
||||
try {
|
||||
log('[KILL] Strategy 3: PowerShell Stop-Process');
|
||||
execSync('powershell -Command "Get-Process rfcp-server -ErrorAction SilentlyContinue | Stop-Process -Force"', {
|
||||
stdio: 'pipe',
|
||||
timeout: 5000,
|
||||
windowsHide: true
|
||||
});
|
||||
log('[KILL] Strategy 3: SUCCESS');
|
||||
} catch (_e) {
|
||||
log('[KILL] Strategy 3: PowerShell failed or no processes');
|
||||
}
|
||||
|
||||
// Strategy 4: PowerShell CimInstance terminate (modern replacement for wmic)
|
||||
try {
|
||||
log('[KILL] Strategy 4: PowerShell CimInstance Terminate');
|
||||
execSync('powershell -NoProfile -Command "Get-CimInstance Win32_Process -Filter \\"name=\'rfcp-server.exe\'\\" | Invoke-CimMethod -MethodName Terminate"', {
|
||||
stdio: 'pipe',
|
||||
timeout: 5000,
|
||||
windowsHide: true
|
||||
});
|
||||
log('[KILL] Strategy 4: SUCCESS');
|
||||
} catch (_e) {
|
||||
log('[KILL] Strategy 4: No processes or failed');
|
||||
}
|
||||
} else {
|
||||
// Unix: pkill
|
||||
try {
|
||||
execSync('pkill -9 -f rfcp-server', {
|
||||
stdio: 'ignore',
|
||||
timeout: 5000
|
||||
});
|
||||
execSync('pkill -9 -f rfcp-server', { stdio: 'pipe', timeout: 5000 });
|
||||
log('[KILL] pkill rfcp-server completed');
|
||||
} catch (_e) {
|
||||
log('[KILL] No rfcp-server processes found');
|
||||
}
|
||||
}
|
||||
|
||||
backendPid = null;
|
||||
backendProcess = null;
|
||||
log('[KILL] === Kill sequence complete ===');
|
||||
}
|
||||
|
||||
/**
|
||||
* Graceful shutdown: ask backend to clean up, then force kill everything.
|
||||
* Graceful shutdown: API call first, then PID-tree kill, then name-based kill.
|
||||
*
|
||||
* The backend's /shutdown endpoint kills workers by name and schedules
|
||||
* os._exit(3s) as a safety net. We then do PID-tree kill (most reliable
|
||||
* on Windows — catches all child processes) while the main PID is still
|
||||
* alive, followed by name-based kill as final sweep.
|
||||
*/
|
||||
async function gracefulShutdown() {
|
||||
log('[SHUTDOWN] Requesting graceful shutdown...');
|
||||
log('[SHUTDOWN] Starting graceful shutdown...');
|
||||
|
||||
// Step 1: Ask backend to clean up workers and exit
|
||||
// Step 1: Ask backend to clean up workers (pool shutdown + name kill)
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 2000);
|
||||
@@ -410,18 +478,22 @@ async function gracefulShutdown() {
|
||||
});
|
||||
clearTimeout(timeout);
|
||||
log('[SHUTDOWN] Backend acknowledged shutdown');
|
||||
// Brief wait for pool.shutdown() to take effect
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
} catch (_e) {
|
||||
log('[SHUTDOWN] Backend did not respond — force killing');
|
||||
}
|
||||
|
||||
// Step 2: Wait briefly for graceful exit
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
|
||||
// Step 3: PID-based kill (catches the main process)
|
||||
// Step 2: PID-tree kill — most reliable, catches all child processes
|
||||
// Must run while main backend PID is still alive (before os._exit safety net)
|
||||
killBackend();
|
||||
|
||||
// Step 4: Name-based kill (catches orphaned workers)
|
||||
killAllBackendProcesses();
|
||||
// Step 3: Name-based kill — catches any orphans not in the process tree
|
||||
killAllRfcpProcesses();
|
||||
|
||||
// Step 4: Wait and verify
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
log('[SHUTDOWN] Shutdown complete');
|
||||
}
|
||||
|
||||
// ── App lifecycle ──────────────────────────────────────────────────
|
||||
@@ -456,8 +528,7 @@ app.whenReady().then(async () => {
|
||||
app.on('window-all-closed', () => {
|
||||
log('[CLOSE] window-all-closed fired');
|
||||
isQuitting = true;
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
killAllRfcpProcesses();
|
||||
|
||||
if (process.platform !== 'darwin') {
|
||||
app.quit();
|
||||
@@ -470,17 +541,32 @@ app.on('activate', () => {
|
||||
}
|
||||
});
|
||||
|
||||
app.on('before-quit', () => {
|
||||
log('[CLOSE] before-quit fired');
|
||||
isQuitting = true;
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
app.on('before-quit', (event) => {
|
||||
log('[CLOSE] before-quit fired, isQuitting=' + isQuitting);
|
||||
if (!isQuitting) {
|
||||
event.preventDefault();
|
||||
isQuitting = true;
|
||||
|
||||
const forceExitTimer = setTimeout(() => {
|
||||
log('[CLOSE] Force exit from before-quit after 5s');
|
||||
killAllRfcpProcesses();
|
||||
process.exit(0);
|
||||
}, 5000);
|
||||
|
||||
gracefulShutdown().then(() => {
|
||||
clearTimeout(forceExitTimer);
|
||||
app.exit(0);
|
||||
}).catch(() => {
|
||||
clearTimeout(forceExitTimer);
|
||||
killAllRfcpProcesses();
|
||||
app.exit(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
app.on('will-quit', () => {
|
||||
log('[CLOSE] will-quit fired');
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
killAllRfcpProcesses();
|
||||
|
||||
if (backendLogStream) {
|
||||
try { backendLogStream.end(); } catch (_e) {}
|
||||
@@ -508,21 +594,19 @@ process.on('exit', () => {
|
||||
}
|
||||
|
||||
// Name-based kill — catches orphaned workers
|
||||
killAllBackendProcesses();
|
||||
killAllRfcpProcesses();
|
||||
});
|
||||
|
||||
// Handle SIGINT/SIGTERM (Ctrl+C, system shutdown)
|
||||
process.on('SIGINT', () => {
|
||||
try { log('[SIGNAL] SIGINT received'); } catch (_e) {}
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
killAllRfcpProcesses();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
process.on('SIGTERM', () => {
|
||||
try { log('[SIGNAL] SIGTERM received'); } catch (_e) {}
|
||||
killBackend();
|
||||
killAllBackendProcesses();
|
||||
killAllRfcpProcesses();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
|
||||
233
docs/RFCP-Native-Backend-Research.md
Normal file
233
docs/RFCP-Native-Backend-Research.md
Normal file
@@ -0,0 +1,233 @@
|
||||
# RFCP Native Backend Research
|
||||
|
||||
## Executive Summary
|
||||
|
||||
**Finding:** The production Electron app already supports native Windows operation without WSL2.
|
||||
|
||||
The production build uses PyInstaller to bundle the Python backend as a standalone Windows executable (`rfcp-server.exe`). WSL2 is only used during development. No migration is needed for end users.
|
||||
|
||||
---
|
||||
|
||||
## Current Architecture
|
||||
|
||||
### Development Mode
|
||||
```
|
||||
RFCP (Electron dev)
|
||||
└── Spawns: python -m uvicorn app.main:app --host 127.0.0.1 --port 8090
|
||||
└── Uses system Python (Windows or WSL2)
|
||||
└── Requires venv with dependencies
|
||||
```
|
||||
|
||||
### Production Mode (Already Implemented)
|
||||
```
|
||||
RFCP.exe (Electron packaged)
|
||||
└── Spawns: rfcp-server.exe (bundled PyInstaller binary)
|
||||
└── Self-contained Python + all dependencies
|
||||
└── No WSL2 required
|
||||
└── No system Python required
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Evidence from Codebase
|
||||
|
||||
### desktop/main.js (Lines 120-145)
|
||||
|
||||
```javascript
|
||||
function startBackend() {
|
||||
// Production: use bundled executable
|
||||
if (isProduction) {
|
||||
const serverPath = path.join(process.resourcesPath, 'rfcp-server.exe');
|
||||
if (fs.existsSync(serverPath)) {
|
||||
backendProcess = spawn(serverPath, [], { ... });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Development: use system Python
|
||||
backendProcess = spawn('python', ['-m', 'uvicorn', 'app.main:app', ...]);
|
||||
}
|
||||
```
|
||||
|
||||
### installer/rfcp-server.spec (PyInstaller Config)
|
||||
|
||||
```python
|
||||
# Key configuration
|
||||
a = Analysis(
|
||||
['run_server.py'],
|
||||
pathex=[backend_path],
|
||||
binaries=[],
|
||||
datas=[
|
||||
('data/terrain', 'data/terrain'), # Terrain data bundled
|
||||
],
|
||||
hiddenimports=[
|
||||
'uvicorn.logging', 'uvicorn.loops', 'uvicorn.protocols',
|
||||
'motor', 'pymongo', 'numpy', 'scipy', 'shapely',
|
||||
# Full list of dependencies
|
||||
],
|
||||
)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
name='rfcp-server',
|
||||
console=True, # Shows console for debugging
|
||||
icon='rfcp.ico',
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## GPU Acceleration in Production
|
||||
|
||||
### Current Status
|
||||
The PyInstaller bundle **does not include CuPy** by default because:
|
||||
1. CuPy requires CUDA runtime (large, ~500MB)
|
||||
2. Not all users have NVIDIA GPUs
|
||||
3. Binary would be too large for distribution
|
||||
|
||||
### Solution Options
|
||||
|
||||
**Option A: Ship CPU-only (Current)**
|
||||
- Production build uses NumPy (CPU) for calculations
|
||||
- GPU acceleration available only in dev mode or manual install
|
||||
- Smallest download size (~100MB)
|
||||
|
||||
**Option B: Separate GPU Installer**
|
||||
- Main installer: CPU-only (~100MB)
|
||||
- Optional GPU addon: Downloads CuPy + CUDA runtime (~600MB)
|
||||
- Implemented via install_rfcp.py dependency installer
|
||||
|
||||
**Option C: CUDA Toolkit Detection**
|
||||
- Detect if CUDA is already installed on user's system
|
||||
- If yes, attempt to load CuPy dynamically
|
||||
- Graceful fallback to NumPy if not available
|
||||
|
||||
### Recommendation
|
||||
Keep Option A (CPU-only production) with Option B available for power users:
|
||||
1. Default production build works everywhere
|
||||
2. Users with NVIDIA GPUs can run `install_rfcp.py` to enable GPU acceleration
|
||||
3. No WSL2 required for either path
|
||||
|
||||
---
|
||||
|
||||
## Terrain Data Handling
|
||||
|
||||
### Current Implementation
|
||||
Terrain data (SRTM .hgt files) is bundled inside the PyInstaller executable:
|
||||
|
||||
```python
|
||||
datas=[
|
||||
('data/terrain', 'data/terrain'),
|
||||
]
|
||||
```
|
||||
|
||||
### Considerations
|
||||
- Bundled terrain data increases exe size significantly
|
||||
- Alternative: Download terrain on first use (like current region download system)
|
||||
- For initial release, bundling common regions is acceptable
|
||||
|
||||
---
|
||||
|
||||
## Database (MongoDB)
|
||||
|
||||
### Production Architecture
|
||||
The Electron app embeds MongoDB or requires MongoDB to be installed separately.
|
||||
|
||||
Options:
|
||||
1. **Embedded MongoDB** - Ships mongod.exe with the app
|
||||
2. **MongoDB Atlas** - Cloud database (requires internet)
|
||||
3. **SQLite** - Switch to file-based database (significant refactor)
|
||||
4. **In-memory + file persistence** - No MongoDB required (significant refactor)
|
||||
|
||||
Current implementation uses Motor (async MongoDB driver). For true standalone operation, consider SQLite migration in future iteration.
|
||||
|
||||
---
|
||||
|
||||
## Build Process
|
||||
|
||||
### Current Build Commands
|
||||
|
||||
```bash
|
||||
# Build backend executable
|
||||
cd /mnt/d/root/rfcp/backend
|
||||
pyinstaller ../installer/rfcp-server.spec
|
||||
|
||||
# Build Electron app with bundled backend
|
||||
cd /mnt/d/root/rfcp/installer
|
||||
./build-win.sh
|
||||
```
|
||||
|
||||
### Output
|
||||
- `rfcp-server.exe` - Standalone backend (~80MB)
|
||||
- `RFCP-Setup-{version}.exe` - Full installer with Electron + backend (~150MB)
|
||||
|
||||
---
|
||||
|
||||
## Testing Native Build
|
||||
|
||||
### Test Procedure
|
||||
1. Build `rfcp-server.exe` via PyInstaller
|
||||
2. Run directly: `./rfcp-server.exe`
|
||||
3. Verify API responds: `curl http://localhost:8090/api/health`
|
||||
4. Verify coverage calculation works
|
||||
5. Check GPU detection in logs
|
||||
|
||||
### Known Issues
|
||||
1. **First launch slow**: PyInstaller extracts on first run (~5-10 seconds)
|
||||
2. **Antivirus false positives**: Some AV flags PyInstaller executables
|
||||
3. **Console window**: Shows black console (use `console=False` for windowless)
|
||||
|
||||
---
|
||||
|
||||
## Conclusions
|
||||
|
||||
### No Migration Needed
|
||||
The production Electron app already works without WSL2. The current architecture is:
|
||||
- ✅ Native Windows executable
|
||||
- ✅ No Python installation required
|
||||
- ✅ No WSL2 required
|
||||
- ✅ Self-contained dependencies
|
||||
|
||||
### Development vs Production
|
||||
| Aspect | Development | Production |
|
||||
|--------|-------------|------------|
|
||||
| Python | System Python / venv | Bundled via PyInstaller |
|
||||
| WSL2 | Optional (for testing) | Not required |
|
||||
| GPU | CuPy if installed | CPU-only (NumPy) |
|
||||
| MongoDB | Local instance | Embedded or Atlas |
|
||||
| Terrain | Local data/ folder | Bundled in exe |
|
||||
|
||||
### Remaining Work
|
||||
1. **GPU for production**: Implement Optional GPU addon installer
|
||||
2. **Smaller package**: On-demand terrain download instead of bundling
|
||||
3. **Database portability**: Consider SQLite migration for offline-first
|
||||
4. **Installer polish**: Signed executables, auto-update support
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Full PyInstaller Hidden Imports
|
||||
|
||||
From `installer/rfcp-server.spec`:
|
||||
```python
|
||||
hiddenimports=[
|
||||
'uvicorn.logging',
|
||||
'uvicorn.loops',
|
||||
'uvicorn.loops.auto',
|
||||
'uvicorn.protocols',
|
||||
'uvicorn.protocols.http',
|
||||
'uvicorn.protocols.http.auto',
|
||||
'uvicorn.protocols.websockets',
|
||||
'uvicorn.protocols.websockets.auto',
|
||||
'uvicorn.lifespan',
|
||||
'uvicorn.lifespan.on',
|
||||
'motor',
|
||||
'pymongo',
|
||||
'numpy',
|
||||
'scipy',
|
||||
'shapely',
|
||||
'shapely.geometry',
|
||||
'shapely.ops',
|
||||
# ... additional imports
|
||||
]
|
||||
```
|
||||
@@ -0,0 +1,463 @@
|
||||
# RFCP — Iteration 3.10: Link Budget, Fresnel Zone & Interference Modeling
|
||||
|
||||
## Overview
|
||||
Add three interconnected RF analysis features: link budget calculator panel, Fresnel zone visualization on terrain profiles, and basic interference (C/I) modeling for multi-site scenarios. These build on existing infrastructure — propagation models, terrain profiles, and multi-site coverage.
|
||||
|
||||
## Priority Order
|
||||
1. Link Budget Calculator (simplest, standalone UI)
|
||||
2. Fresnel Zone Visualization (extends terrain profile)
|
||||
3. Interference Modeling (extends coverage engine)
|
||||
|
||||
---
|
||||
|
||||
## Feature 1: Link Budget Calculator
|
||||
|
||||
### Description
|
||||
A panel/dialog that shows the complete RF link budget as a table — from transmitter to receiver. Uses existing propagation model values but presents them in the standard telecom link budget format.
|
||||
|
||||
### Implementation
|
||||
|
||||
**New component:** `frontend/src/components/panels/LinkBudgetPanel.tsx`
|
||||
|
||||
The panel should display a table with rows for each element in the link chain. It should use the currently selected site's parameters and a configurable receiver point (either clicked on map or manually entered coordinates).
|
||||
|
||||
**Link Budget Table Structure:**
|
||||
```
|
||||
TRANSMITTER
|
||||
Tx Power (dBm) [from site config, e.g. 43 dBm]
|
||||
Tx Antenna Gain (dBi) [from site config, e.g. 18 dBi]
|
||||
Tx Cable/Connector Loss (dB) [new field, default 2 dB]
|
||||
EIRP (dBm) = Tx Power + Gain - Cable Loss
|
||||
|
||||
PATH
|
||||
Distance (km) [calculated from Tx to Rx point]
|
||||
Free Space Path Loss (dB) [existing formula: 20log(d) + 20log(f) + 32.45]
|
||||
Terrain Diffraction Loss (dB) [from terrain_los model if available]
|
||||
Vegetation Loss (dB) [from vegetation model if available]
|
||||
Atmospheric Loss (dB) [from atmospheric model if available]
|
||||
Total Path Loss (dB) = sum of all path losses
|
||||
|
||||
RECEIVER
|
||||
Rx Antenna Gain (dBi) [configurable, default 0 dBi for handset]
|
||||
Rx Cable Loss (dB) [configurable, default 0 dB]
|
||||
Rx Sensitivity (dBm) [configurable, default -100 dBm]
|
||||
|
||||
RESULT
|
||||
Received Power (dBm) = EIRP - Total Path Loss + Rx Gain - Rx Cable
|
||||
Link Margin (dB) = Received Power - Rx Sensitivity
|
||||
Status = "OK" if margin > 0, "FAIL" if < 0
|
||||
```
|
||||
|
||||
**Backend addition:** Add a new endpoint or extend existing coverage API.
|
||||
|
||||
**File:** `backend/app/api/routes/coverage.py` (or new `link_budget.py`)
|
||||
|
||||
```python
|
||||
@router.post("/api/link-budget")
|
||||
async def calculate_link_budget(request: dict):
|
||||
"""Calculate point-to-point link budget.
|
||||
|
||||
Body: {
|
||||
"site_id": "...", # or tx_lat/tx_lon/tx_params
|
||||
"tx_lat": 48.46,
|
||||
"tx_lon": 35.04,
|
||||
"tx_power_dbm": 43,
|
||||
"tx_gain_dbi": 18,
|
||||
"tx_cable_loss_db": 2,
|
||||
"tx_height_m": 30,
|
||||
"rx_lat": 48.50,
|
||||
"rx_lon": 35.10,
|
||||
"rx_gain_dbi": 0,
|
||||
"rx_cable_loss_db": 0,
|
||||
"rx_sensitivity_dbm": -100,
|
||||
"rx_height_m": 1.5,
|
||||
"frequency_mhz": 1800
|
||||
}
|
||||
"""
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
# Calculate distance
|
||||
distance_m = terrain_service.haversine_distance(
|
||||
request["tx_lat"], request["tx_lon"],
|
||||
request["rx_lat"], request["rx_lon"]
|
||||
)
|
||||
distance_km = distance_m / 1000
|
||||
|
||||
# Get elevations
|
||||
tx_elev = await terrain_service.get_elevation(request["tx_lat"], request["tx_lon"])
|
||||
rx_elev = await terrain_service.get_elevation(request["rx_lat"], request["rx_lon"])
|
||||
|
||||
# EIRP
|
||||
eirp_dbm = request["tx_power_dbm"] + request["tx_gain_dbi"] - request["tx_cable_loss_db"]
|
||||
|
||||
# Free space path loss
|
||||
freq = request["frequency_mhz"]
|
||||
fspl_db = 20 * math.log10(distance_km) + 20 * math.log10(freq) + 32.45 if distance_km > 0 else 0
|
||||
|
||||
# Terrain profile for LOS check
|
||||
profile = await terrain_service.get_elevation_profile(
|
||||
request["tx_lat"], request["tx_lon"],
|
||||
request["rx_lat"], request["rx_lon"],
|
||||
num_points=100
|
||||
)
|
||||
|
||||
# Simple LOS check - does terrain block line of sight?
|
||||
tx_total_height = tx_elev + request.get("tx_height_m", 30)
|
||||
rx_total_height = rx_elev + request.get("rx_height_m", 1.5)
|
||||
|
||||
terrain_loss_db = 0
|
||||
los_clear = True
|
||||
for i, point in enumerate(profile):
|
||||
if i == 0 or i == len(profile) - 1:
|
||||
continue
|
||||
# Linear interpolation of LOS line at this point
|
||||
fraction = i / (len(profile) - 1)
|
||||
los_height = tx_total_height + fraction * (rx_total_height - tx_total_height)
|
||||
if point["elevation"] > los_height:
|
||||
los_clear = False
|
||||
# Simple knife-edge diffraction estimate
|
||||
terrain_loss_db += 6 # ~6dB per obstruction (simplified)
|
||||
|
||||
total_path_loss = fspl_db + terrain_loss_db
|
||||
|
||||
# Received power
|
||||
rx_power_dbm = eirp_dbm - total_path_loss + request["rx_gain_dbi"] - request["rx_cable_loss_db"]
|
||||
|
||||
# Link margin
|
||||
margin_db = rx_power_dbm - request["rx_sensitivity_dbm"]
|
||||
|
||||
return {
|
||||
"distance_km": round(distance_km, 2),
|
||||
"distance_m": round(distance_m, 1),
|
||||
"tx_elevation_m": round(tx_elev, 1),
|
||||
"rx_elevation_m": round(rx_elev, 1),
|
||||
"eirp_dbm": round(eirp_dbm, 1),
|
||||
"fspl_db": round(fspl_db, 1),
|
||||
"terrain_loss_db": round(terrain_loss_db, 1),
|
||||
"total_path_loss_db": round(total_path_loss, 1),
|
||||
"los_clear": los_clear,
|
||||
"rx_power_dbm": round(rx_power_dbm, 1),
|
||||
"margin_db": round(margin_db, 1),
|
||||
"status": "OK" if margin_db >= 0 else "FAIL",
|
||||
"profile": profile,
|
||||
}
|
||||
```
|
||||
|
||||
### UI Requirements
|
||||
- New panel accessible from sidebar or toolbar button (calculator icon)
|
||||
- Click on map to set Rx point (with crosshair cursor)
|
||||
- Auto-populates Tx params from selected site
|
||||
- Shows result table with color coding (green margin = OK, red = FAIL)
|
||||
- Optionally draws line on map from Tx to Rx
|
||||
|
||||
---
|
||||
|
||||
## Feature 2: Fresnel Zone Visualization
|
||||
|
||||
### Description
|
||||
Draw Fresnel zone ellipse overlay on the Terrain Profile chart, showing where terrain intrudes into the first Fresnel zone. This is critical for understanding if a radio link will actually work — even if terrain doesn't block direct LOS, Fresnel zone obstruction causes significant signal loss.
|
||||
|
||||
### Implementation
|
||||
|
||||
**Modify:** The existing Terrain Profile component/chart
|
||||
|
||||
**Fresnel Zone Radius Formula:**
|
||||
```python
|
||||
import math
|
||||
|
||||
def fresnel_radius(n: int, frequency_mhz: float, d1_m: float, d2_m: float) -> float:
|
||||
"""Calculate nth Fresnel zone radius at a point along the path.
|
||||
|
||||
Args:
|
||||
n: Fresnel zone number (1 = first zone, most important)
|
||||
frequency_mhz: Frequency in MHz
|
||||
d1_m: Distance from transmitter to this point (meters)
|
||||
d2_m: Distance from this point to receiver (meters)
|
||||
|
||||
Returns:
|
||||
Radius of nth Fresnel zone in meters
|
||||
"""
|
||||
wavelength = 300.0 / frequency_mhz # meters
|
||||
d_total = d1_m + d2_m
|
||||
if d_total == 0:
|
||||
return 0
|
||||
radius = math.sqrt((n * wavelength * d1_m * d2_m) / d_total)
|
||||
return radius
|
||||
```
|
||||
|
||||
**Backend endpoint:** `backend/app/api/routes/coverage.py`
|
||||
|
||||
```python
|
||||
@router.post("/api/fresnel-profile")
|
||||
async def fresnel_profile(request: dict):
|
||||
"""Calculate terrain profile with Fresnel zone boundaries.
|
||||
|
||||
Body: {
|
||||
"tx_lat": 48.46, "tx_lon": 35.04, "tx_height_m": 30,
|
||||
"rx_lat": 48.50, "rx_lon": 35.10, "rx_height_m": 1.5,
|
||||
"frequency_mhz": 1800,
|
||||
"num_points": 100
|
||||
}
|
||||
"""
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
tx_lat, tx_lon = request["tx_lat"], request["tx_lon"]
|
||||
rx_lat, rx_lon = request["rx_lat"], request["rx_lon"]
|
||||
tx_height = request.get("tx_height_m", 30)
|
||||
rx_height = request.get("rx_height_m", 1.5)
|
||||
freq = request.get("frequency_mhz", 1800)
|
||||
num_points = request.get("num_points", 100)
|
||||
|
||||
# Get terrain profile
|
||||
profile = await terrain_service.get_elevation_profile(
|
||||
tx_lat, tx_lon, rx_lat, rx_lon, num_points
|
||||
)
|
||||
|
||||
total_distance = profile[-1]["distance"] if profile else 0
|
||||
|
||||
# Get endpoint elevations
|
||||
tx_elev = profile[0]["elevation"] if profile else 0
|
||||
rx_elev = profile[-1]["elevation"] if profile else 0
|
||||
tx_total = tx_elev + tx_height
|
||||
rx_total = rx_elev + rx_height
|
||||
|
||||
wavelength = 300.0 / freq # meters
|
||||
|
||||
# Calculate Fresnel zone at each profile point
|
||||
fresnel_data = []
|
||||
los_blocked = False
|
||||
fresnel_blocked = False
|
||||
worst_clearance = float('inf')
|
||||
|
||||
for i, point in enumerate(profile):
|
||||
d1 = point["distance"] # distance from tx
|
||||
d2 = total_distance - d1 # distance to rx
|
||||
|
||||
# LOS height at this point (linear interpolation)
|
||||
if total_distance > 0:
|
||||
fraction = d1 / total_distance
|
||||
else:
|
||||
fraction = 0
|
||||
los_height = tx_total + fraction * (rx_total - tx_total)
|
||||
|
||||
# First Fresnel zone radius
|
||||
if d1 > 0 and d2 > 0 and total_distance > 0:
|
||||
f1_radius = math.sqrt((1 * wavelength * d1 * d2) / total_distance)
|
||||
else:
|
||||
f1_radius = 0
|
||||
|
||||
# Fresnel zone boundaries (height above sea level)
|
||||
fresnel_top = los_height + f1_radius
|
||||
fresnel_bottom = los_height - f1_radius
|
||||
|
||||
# Clearance: how much space between terrain and Fresnel bottom
|
||||
clearance = fresnel_bottom - point["elevation"]
|
||||
|
||||
if clearance < worst_clearance:
|
||||
worst_clearance = clearance
|
||||
|
||||
if point["elevation"] > los_height:
|
||||
los_blocked = True
|
||||
if point["elevation"] > fresnel_bottom:
|
||||
fresnel_blocked = True
|
||||
|
||||
fresnel_data.append({
|
||||
"distance": point["distance"],
|
||||
"lat": point["lat"],
|
||||
"lon": point["lon"],
|
||||
"terrain_elevation": point["elevation"],
|
||||
"los_height": round(los_height, 1),
|
||||
"fresnel_top": round(fresnel_top, 1),
|
||||
"fresnel_bottom": round(fresnel_bottom, 1),
|
||||
"f1_radius": round(f1_radius, 1),
|
||||
"clearance": round(clearance, 1),
|
||||
})
|
||||
|
||||
return {
|
||||
"profile": fresnel_data,
|
||||
"total_distance_m": round(total_distance, 1),
|
||||
"tx_elevation": round(tx_elev, 1),
|
||||
"rx_elevation": round(rx_elev, 1),
|
||||
"frequency_mhz": freq,
|
||||
"wavelength_m": round(wavelength, 4),
|
||||
"los_clear": not los_blocked,
|
||||
"fresnel_clear": not fresnel_blocked,
|
||||
"worst_clearance_m": round(worst_clearance, 1),
|
||||
"recommendation": (
|
||||
"Clear — excellent link" if not fresnel_blocked
|
||||
else "Fresnel zone partially blocked — expect 3-6 dB additional loss"
|
||||
if not los_blocked
|
||||
else "LOS blocked — significant diffraction loss expected"
|
||||
),
|
||||
}
|
||||
```
|
||||
|
||||
### Frontend Visualization
|
||||
On the existing Terrain Profile chart:
|
||||
- Draw the LOS line (straight line from Tx to Rx) — this may already exist
|
||||
- Draw first Fresnel zone as a **semi-transparent elliptical area** around the LOS line
|
||||
- Upper boundary = `fresnel_top` series
|
||||
- Lower boundary = `fresnel_bottom` series
|
||||
- Color: light blue with ~20% opacity
|
||||
- Where terrain intersects Fresnel zone, highlight in red/orange
|
||||
- Show clearance info in the profile tooltip
|
||||
- Add a summary badge: "LOS Clear ✓" / "Fresnel 60% Clear ⚠" / "LOS Blocked ✗"
|
||||
|
||||
---
|
||||
|
||||
## Feature 3: Interference Modeling (C/I Ratio)
|
||||
|
||||
### Description
|
||||
Add carrier-to-interference ratio calculation to the coverage engine. For each grid point, calculate the C/I ratio: the signal from the serving cell vs the sum of signals from all other cells on the same frequency. Display as a separate heatmap layer.
|
||||
|
||||
### Implementation
|
||||
|
||||
**Backend changes:**
|
||||
|
||||
**File:** `backend/app/services/coverage_service.py` (or gpu_service.py)
|
||||
|
||||
Add C/I calculation after existing coverage computation:
|
||||
|
||||
```python
|
||||
def calculate_interference(self, sites: list, coverage_results: dict) -> np.ndarray:
|
||||
"""Calculate C/I ratio for each grid point.
|
||||
|
||||
For each point:
|
||||
- C = signal strength from strongest (serving) cell
|
||||
- I = sum of signal strengths from all other co-frequency cells
|
||||
- C/I = C - 10*log10(sum of linear interference powers)
|
||||
|
||||
Returns array of C/I values in dB.
|
||||
"""
|
||||
# Get all RSRP grids (already calculated)
|
||||
# For each point, find:
|
||||
# 1. Best server (strongest signal) = C
|
||||
# 2. Sum of all others on same frequency = I
|
||||
# 3. C/I = C(dBm) - I(dBm)
|
||||
|
||||
# Group sites by frequency
|
||||
freq_groups = {}
|
||||
for site in sites:
|
||||
freq = site.get("frequency_mhz", 1800)
|
||||
if freq not in freq_groups:
|
||||
freq_groups[freq] = []
|
||||
freq_groups[freq].append(site)
|
||||
|
||||
# Only calculate interference for frequency groups with 2+ sites
|
||||
# For single-site frequencies, C/I = infinity (no interference)
|
||||
|
||||
# The RSRP values are already in dBm, need to convert to linear for summing
|
||||
# P_linear = 10^(P_dBm / 10)
|
||||
# I_total_linear = sum(P_linear for all interferers)
|
||||
# I_total_dBm = 10 * log10(I_total_linear)
|
||||
# C/I = C_dBm - I_total_dBm
|
||||
pass
|
||||
```
|
||||
|
||||
**Key algorithm (for GPU pipeline in gpu_service.py):**
|
||||
```python
|
||||
# After computing RSRP for all sites at all grid points:
|
||||
# rsrp_grid shape: (num_sites, num_points) in dBm
|
||||
|
||||
# Convert to linear (mW)
|
||||
rsrp_linear = 10 ** (rsrp_grid / 10.0) # CuPy array
|
||||
|
||||
# For each point, best server
|
||||
best_server_idx = cp.argmax(rsrp_grid, axis=0)
|
||||
best_rsrp_linear = cp.take_along_axis(rsrp_linear, best_server_idx[cp.newaxis, :], axis=0)[0]
|
||||
|
||||
# Total power from all sites
|
||||
total_power = cp.sum(rsrp_linear, axis=0)
|
||||
|
||||
# Interference = total - serving
|
||||
interference_linear = total_power - best_rsrp_linear
|
||||
|
||||
# C/I ratio in dB
|
||||
# Avoid log10(0) with small epsilon
|
||||
epsilon = 1e-30
|
||||
ci_ratio_db = 10 * cp.log10(best_rsrp_linear / (interference_linear + epsilon))
|
||||
|
||||
# Clip to reasonable range
|
||||
ci_ratio_db = cp.clip(ci_ratio_db, -20, 50)
|
||||
```
|
||||
|
||||
### Frontend Visualization
|
||||
- Add a toggle in the coverage controls: "Show: Signal (RSRP) | Interference (C/I)"
|
||||
- C/I heatmap uses different color scale:
|
||||
- Dark red: < 0 dB (interference dominant — no service)
|
||||
- Orange: 0-10 dB (marginal)
|
||||
- Yellow: 10-20 dB (acceptable)
|
||||
- Green: 20-30 dB (good)
|
||||
- Blue: > 30 dB (excellent, minimal interference)
|
||||
- The C/I map only makes sense with 2+ sites on same frequency
|
||||
- Show warning if all sites are on different frequencies (no co-channel interference)
|
||||
|
||||
### API Response Extension
|
||||
Add `ci_ratio` field to coverage calculation response alongside existing `rsrp` values.
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
### Link Budget
|
||||
- [ ] Panel opens from toolbar/sidebar
|
||||
- [ ] Click on map sets Rx point
|
||||
- [ ] Tx parameters auto-populate from selected site
|
||||
- [ ] Link budget table shows all rows correctly
|
||||
- [ ] Margin calculation is correct (manual verification)
|
||||
- [ ] Color coding: green for positive margin, red for negative
|
||||
- [ ] Line drawn on map from Tx to Rx
|
||||
|
||||
### Fresnel Zone
|
||||
- [ ] Terrain profile shows Fresnel zone overlay
|
||||
- [ ] Fresnel ellipse is widest at midpoint (correct shape)
|
||||
- [ ] Red highlighting where terrain enters Fresnel zone
|
||||
- [ ] Summary shows LOS/Fresnel status
|
||||
- [ ] Works at different frequencies (zone size changes with frequency)
|
||||
- [ ] Clearance values are reasonable (first Fresnel zone at 1800 MHz, 10km = ~22m radius at midpoint)
|
||||
|
||||
### Interference
|
||||
- [ ] C/I toggle appears when 2+ sites exist
|
||||
- [ ] C/I heatmap renders with correct color scale
|
||||
- [ ] Single-site scenario shows "no interference" or infinite C/I
|
||||
- [ ] Two sites on same frequency show interference zones between them
|
||||
- [ ] C/I values are reasonable (> 20 dB near serving cell, < 10 dB at cell edge)
|
||||
|
||||
## Build & Deploy
|
||||
|
||||
```bash
|
||||
cd D:\root\rfcp
|
||||
|
||||
# Backend — just restart uvicorn (Python, no build)
|
||||
cd backend
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
|
||||
|
||||
# Frontend — rebuild if UI components changed
|
||||
cd frontend
|
||||
npm run build
|
||||
|
||||
# Full installer rebuild if needed
|
||||
# (use existing build script)
|
||||
```
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
feat(rf): add link budget, Fresnel zone, and interference modeling
|
||||
|
||||
- Add /api/link-budget endpoint with full path analysis
|
||||
- Add /api/fresnel-profile endpoint with zone clearance calculation
|
||||
- Add C/I ratio computation to GPU coverage pipeline
|
||||
- Add LinkBudgetPanel frontend component
|
||||
- Add Fresnel zone overlay to terrain profile chart
|
||||
- Add C/I heatmap toggle alongside RSRP display
|
||||
- Group interference by frequency for co-channel analysis
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. Link budget shows correct margin for known test case (Dnipro, 10km, 1800MHz)
|
||||
2. Fresnel zone visually shows ellipse on terrain profile
|
||||
3. Two co-frequency sites show interference pattern between them
|
||||
4. All three features work with existing terrain data (no new downloads needed)
|
||||
5. GPU pipeline performance not significantly degraded by C/I calculation
|
||||
210
docs/devlog/gpu_supp/RFCP-3.10.1-UI-Bugfixes.md
Normal file
210
docs/devlog/gpu_supp/RFCP-3.10.1-UI-Bugfixes.md
Normal file
@@ -0,0 +1,210 @@
|
||||
# RFCP — Iteration 3.10.1: UI/UX Bugfixes
|
||||
|
||||
## Overview
|
||||
Four bugs found during 3.10 testing. All are frontend issues, no backend changes needed.
|
||||
|
||||
---
|
||||
|
||||
## Bug 1: Ruler places point when clicking Terrain Profile button
|
||||
|
||||
**Problem:** When Ruler mode is active and user clicks "Terrain Profile" button in the measurement overlay, it also places a ruler point on the map underneath. The click event propagates to the map.
|
||||
|
||||
**Fix:** Stop event propagation on the Terrain Profile button click handler. The Terrain Profile button (and any overlay UI elements) should call `e.stopPropagation()` to prevent the click from reaching the map layer.
|
||||
|
||||
Also review: any other UI overlays that sit on top of the map (Link Budget panel, coverage controls, etc.) should also stop propagation to prevent accidental ruler/site placement.
|
||||
|
||||
**Files to check:**
|
||||
- MeasurementTool component (Terrain Profile button handler)
|
||||
- Any overlay/popup components that sit on top of the Leaflet map
|
||||
|
||||
---
|
||||
|
||||
## Bug 2: Cursor should be default arrow, not hand; Ruler snap to site
|
||||
|
||||
**Problem A:** The map cursor shows as a grab/hand icon. Should be default arrow cursor for normal mode. Hand cursor should only appear when dragging the map.
|
||||
|
||||
**Fix A:** Set Leaflet map cursor styles:
|
||||
```css
|
||||
/* Default cursor */
|
||||
.leaflet-container {
|
||||
cursor: default !important;
|
||||
}
|
||||
|
||||
/* Grabbing only when dragging */
|
||||
.leaflet-container.leaflet-drag-target {
|
||||
cursor: grabbing !important;
|
||||
}
|
||||
|
||||
/* Crosshair for ruler mode */
|
||||
.leaflet-container.ruler-mode {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
|
||||
/* Crosshair for RX point placement mode */
|
||||
.leaflet-container.rx-placement-mode {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
```
|
||||
|
||||
Apply CSS classes to the map container based on current mode. Remove Leaflet's default grab cursor.
|
||||
|
||||
**Problem B:** When using the ruler, it should be possible to snap the ruler start/end point exactly to a site (tower) location. Currently you have to eyeball it.
|
||||
|
||||
**Fix B:** When in ruler mode and clicking near a site marker (within ~20px), snap the ruler point to the exact site coordinates. This gives precise distance measurements from tower to any point.
|
||||
|
||||
```typescript
|
||||
// In ruler click handler:
|
||||
const SNAP_DISTANCE_PX = 20;
|
||||
|
||||
function findNearestSite(clickLatLng: L.LatLng, map: L.Map): Site | null {
|
||||
const clickPoint = map.latLngToContainerPoint(clickLatLng);
|
||||
let nearest: Site | null = null;
|
||||
let minDist = Infinity;
|
||||
|
||||
for (const site of sites) {
|
||||
const sitePoint = map.latLngToContainerPoint(L.latLng(site.lat, site.lon));
|
||||
const dist = clickPoint.distanceTo(sitePoint);
|
||||
if (dist < SNAP_DISTANCE_PX && dist < minDist) {
|
||||
minDist = dist;
|
||||
nearest = site;
|
||||
}
|
||||
}
|
||||
return nearest;
|
||||
}
|
||||
|
||||
// When placing ruler point:
|
||||
const snappedSite = findNearestSite(clickLatLng, map);
|
||||
if (snappedSite) {
|
||||
// Use exact site coordinates
|
||||
rulerPoint = L.latLng(snappedSite.lat, snappedSite.lon);
|
||||
} else {
|
||||
rulerPoint = clickLatLng;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Bug 3: Link Budget Calculator text invisible + RX point not placed on map
|
||||
|
||||
**Problem A:** Text in Link Budget Calculator panel is black on dark background — invisible. The input fields and labels need light text color for dark theme.
|
||||
|
||||
**Fix A:** Ensure all text in LinkBudgetPanel uses light colors:
|
||||
```css
|
||||
/* All text in the panel should be light */
|
||||
color: #e2e8f0; /* or whatever the app's light text color is */
|
||||
|
||||
/* Input fields */
|
||||
input {
|
||||
color: #e2e8f0;
|
||||
background: #1e293b; /* dark input background */
|
||||
border: 1px solid #475569;
|
||||
}
|
||||
|
||||
/* Labels */
|
||||
label {
|
||||
color: #94a3b8; /* slightly muted for labels */
|
||||
}
|
||||
|
||||
/* Values/results */
|
||||
.result-value {
|
||||
color: #f1f5f9; /* bright white for important values */
|
||||
}
|
||||
```
|
||||
|
||||
Check if the panel is using Tailwind classes — if so, ensure `text-slate-200` or similar is applied to the container. The panel likely inherits wrong text color or has hardcoded dark text.
|
||||
|
||||
**Problem B:** When clicking "Click on Map to Set RX Point" and then clicking on the map, the RX marker does not appear on the map. The coordinates might update in the fields but there's no visual indicator.
|
||||
|
||||
**Fix B:** When RX point is set:
|
||||
1. Place a visible marker on the map at the RX location (use a different icon than the TX site — e.g., a small circle or pin in a different color like orange or blue)
|
||||
2. Draw a dashed line from the TX site to the RX marker
|
||||
3. The marker should be draggable to adjust position
|
||||
4. When Link Budget panel is closed, remove the RX marker and line
|
||||
|
||||
```typescript
|
||||
// RX marker icon (different from site markers)
|
||||
const rxIcon = L.divIcon({
|
||||
className: 'rx-marker',
|
||||
html: '<div style="width: 12px; height: 12px; background: #f97316; border: 2px solid white; border-radius: 50%;"></div>',
|
||||
iconSize: [12, 12],
|
||||
iconAnchor: [6, 6],
|
||||
});
|
||||
|
||||
// Place marker
|
||||
const rxMarker = L.marker([rxLat, rxLon], { icon: rxIcon, draggable: true }).addTo(map);
|
||||
|
||||
// Dashed line from TX to RX
|
||||
const linkLine = L.polyline([[txLat, txLon], [rxLat, rxLon]], {
|
||||
color: '#f97316',
|
||||
weight: 2,
|
||||
dashArray: '8, 4',
|
||||
opacity: 0.8,
|
||||
}).addTo(map);
|
||||
|
||||
// Update on drag
|
||||
rxMarker.on('drag', (e) => {
|
||||
const pos = e.target.getLatLng();
|
||||
linkLine.setLatLngs([[txLat, txLon], [pos.lat, pos.lng]]);
|
||||
// Update Link Budget panel coordinates
|
||||
updateRxCoordinates(pos.lat, pos.lng);
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Bug 4: Elevation color opacity not working
|
||||
|
||||
**Problem:** The opacity control for elevation/terrain colors on the map is not functioning. Adjusting the opacity slider has no effect on the terrain overlay visibility.
|
||||
|
||||
**Fix:** Check how the elevation overlay is rendered:
|
||||
|
||||
1. If it's a tile layer (Leaflet tile overlay), use `layer.setOpacity(value)`
|
||||
2. If it's the topo map layer, the opacity needs to be applied to the correct layer reference
|
||||
3. If it's the coverage heatmap opacity that's broken, check the canvas renderer opacity
|
||||
|
||||
The "Elev" button on the right toolbar likely toggles an elevation visualization. Find where this layer is created and ensure:
|
||||
|
||||
```typescript
|
||||
// When opacity slider changes:
|
||||
elevationLayer.setOpacity(opacityValue);
|
||||
|
||||
// Or if it's a canvas overlay:
|
||||
const canvas = document.querySelector('.elevation-overlay');
|
||||
if (canvas) {
|
||||
canvas.style.opacity = String(opacityValue);
|
||||
}
|
||||
```
|
||||
|
||||
Also check: there might be TWO opacity controls that are confused:
|
||||
- Coverage heatmap opacity (the RSRP colors)
|
||||
- Terrain/elevation color overlay opacity (the topo colors)
|
||||
|
||||
Make sure each slider controls the correct layer.
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Click Terrain Profile button with Ruler active — NO extra ruler point placed
|
||||
- [ ] Default cursor is arrow, not hand
|
||||
- [ ] Cursor changes to crosshair in Ruler mode
|
||||
- [ ] Cursor changes to crosshair in RX placement mode
|
||||
- [ ] Ruler snaps to site when clicking near tower marker
|
||||
- [ ] Link Budget panel text is readable (light on dark)
|
||||
- [ ] Clicking map in RX mode places visible orange marker
|
||||
- [ ] Dashed line drawn from TX to RX
|
||||
- [ ] RX marker removed when panel closes
|
||||
- [ ] Elevation opacity slider actually changes overlay transparency
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
fix(ui): resolve ruler propagation, cursor, link budget visibility, elevation opacity
|
||||
|
||||
- Stop click propagation on Terrain Profile button (prevents ruler point)
|
||||
- Change default cursor to arrow, crosshair for tool modes
|
||||
- Add ruler snap-to-site (20px threshold)
|
||||
- Fix Link Budget panel text colors for dark theme
|
||||
- Add RX marker and dashed line on map
|
||||
- Fix elevation overlay opacity control binding
|
||||
```
|
||||
349
docs/devlog/gpu_supp/RFCP-3.10.2-ToolMode-ClickFixes.md
Normal file
349
docs/devlog/gpu_supp/RFCP-3.10.2-ToolMode-ClickFixes.md
Normal file
@@ -0,0 +1,349 @@
|
||||
# RFCP — Iteration 3.10.2: Tool Mode System & Click Fixes
|
||||
|
||||
## Root Cause
|
||||
All click-related bugs share one root cause: multiple features compete for the same map click event. Ruler, RX point placement, site placement, and terrain profile all listen to map clicks simultaneously. There's no centralized "active tool" state that prevents conflicts.
|
||||
|
||||
## Solution: Active Tool Mode
|
||||
Create a single source of truth for which tool is currently active. Only the active tool receives map click events.
|
||||
|
||||
### Tool Modes (mutually exclusive):
|
||||
```typescript
|
||||
type ActiveTool =
|
||||
| 'none' // Default — pan/zoom only, no click actions
|
||||
| 'ruler' // Distance measurement, click to add points
|
||||
| 'rx-placement' // Link Budget RX point, single click
|
||||
| 'site-placement' // Place new site on map
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
||||
**1. Add to app store (Zustand):**
|
||||
|
||||
```typescript
|
||||
// In the main store or a new toolStore:
|
||||
interface ToolState {
|
||||
activeTool: ActiveTool;
|
||||
setActiveTool: (tool: ActiveTool) => void;
|
||||
clearTool: () => void;
|
||||
}
|
||||
|
||||
const useToolStore = create<ToolState>((set) => ({
|
||||
activeTool: 'none',
|
||||
setActiveTool: (tool) => set({ activeTool: tool }),
|
||||
clearTool: () => set({ activeTool: 'none' }),
|
||||
}));
|
||||
```
|
||||
|
||||
**2. Map click handler — single entry point:**
|
||||
|
||||
Replace all individual map click listeners with ONE handler:
|
||||
|
||||
```typescript
|
||||
// In the main Map component:
|
||||
map.on('click', (e: L.LeafletMouseEvent) => {
|
||||
const { activeTool } = useToolStore.getState();
|
||||
|
||||
switch (activeTool) {
|
||||
case 'ruler':
|
||||
handleRulerClick(e);
|
||||
break;
|
||||
case 'rx-placement':
|
||||
handleRxPlacement(e);
|
||||
break;
|
||||
case 'site-placement':
|
||||
handleSitePlacement(e);
|
||||
break;
|
||||
case 'none':
|
||||
default:
|
||||
// No action on map click — just pan/zoom
|
||||
break;
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
**3. Cursor changes based on active tool:**
|
||||
|
||||
```typescript
|
||||
useEffect(() => {
|
||||
const container = map.getContainer();
|
||||
// Remove all tool cursors
|
||||
container.classList.remove('ruler-mode', 'rx-placement-mode', 'site-placement-mode');
|
||||
|
||||
switch (activeTool) {
|
||||
case 'ruler':
|
||||
container.classList.add('ruler-mode');
|
||||
break;
|
||||
case 'rx-placement':
|
||||
container.classList.add('rx-placement-mode');
|
||||
break;
|
||||
case 'site-placement':
|
||||
container.classList.add('site-placement-mode');
|
||||
break;
|
||||
default:
|
||||
// Default cursor (arrow)
|
||||
break;
|
||||
}
|
||||
}, [activeTool]);
|
||||
```
|
||||
|
||||
**4. CSS for cursors:**
|
||||
|
||||
```css
|
||||
.leaflet-container {
|
||||
cursor: default !important;
|
||||
}
|
||||
|
||||
.leaflet-container.leaflet-dragging {
|
||||
cursor: grabbing !important;
|
||||
}
|
||||
|
||||
.leaflet-container.ruler-mode {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
|
||||
.leaflet-container.rx-placement-mode {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
|
||||
.leaflet-container.site-placement-mode {
|
||||
cursor: cell !important;
|
||||
}
|
||||
```
|
||||
|
||||
**5. UI buttons toggle tool mode:**
|
||||
|
||||
```typescript
|
||||
// Ruler button:
|
||||
const handleRulerToggle = () => {
|
||||
if (activeTool === 'ruler') {
|
||||
clearTool(); // Toggle off
|
||||
} else {
|
||||
setActiveTool('ruler'); // Activate ruler, deactivate others
|
||||
}
|
||||
};
|
||||
|
||||
// Link Budget "Click on Map to Set RX Point" button:
|
||||
const handleRxModeToggle = () => {
|
||||
if (activeTool === 'rx-placement') {
|
||||
clearTool();
|
||||
} else {
|
||||
setActiveTool('rx-placement');
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
**6. Auto-deactivation:**
|
||||
- RX placement: deactivate after single click (point is set)
|
||||
- Ruler: stays active until toggled off or right-click finishes
|
||||
- Site placement: deactivate after placing site
|
||||
|
||||
---
|
||||
|
||||
## Fix: Ruler Snap to Site
|
||||
|
||||
In the ruler click handler, check proximity to existing sites:
|
||||
|
||||
```typescript
|
||||
function handleRulerClick(e: L.LeafletMouseEvent) {
|
||||
const map = e.target;
|
||||
const clickPoint = map.latLngToContainerPoint(e.latlng);
|
||||
const SNAP_THRESHOLD_PX = 20;
|
||||
|
||||
// Check all site markers
|
||||
let snappedLatLng = e.latlng;
|
||||
let snapped = false;
|
||||
|
||||
for (const site of sites) {
|
||||
const siteLatLng = L.latLng(site.lat, site.lon);
|
||||
const sitePoint = map.latLngToContainerPoint(siteLatLng);
|
||||
const pixelDist = clickPoint.distanceTo(sitePoint);
|
||||
|
||||
if (pixelDist < SNAP_THRESHOLD_PX) {
|
||||
snappedLatLng = siteLatLng;
|
||||
snapped = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Add ruler point at snapped or original location
|
||||
addRulerPoint(snappedLatLng);
|
||||
|
||||
// Optional: visual feedback for snap
|
||||
if (snapped) {
|
||||
// Brief highlight on the site marker
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix: RX Point Placement + Visual Marker
|
||||
|
||||
When in 'rx-placement' mode and map is clicked:
|
||||
|
||||
```typescript
|
||||
function handleRxPlacement(e: L.LeafletMouseEvent) {
|
||||
const { lat, lng } = e.latlng;
|
||||
|
||||
// Update Link Budget panel coordinates
|
||||
setRxCoordinates(lat, lng);
|
||||
|
||||
// Place visible marker on map
|
||||
if (rxMarkerRef.current) {
|
||||
rxMarkerRef.current.setLatLng([lat, lng]);
|
||||
} else {
|
||||
rxMarkerRef.current = L.marker([lat, lng], {
|
||||
icon: L.divIcon({
|
||||
className: 'rx-point-marker',
|
||||
html: `<div style="
|
||||
width: 14px; height: 14px;
|
||||
background: #f97316;
|
||||
border: 2px solid #fff;
|
||||
border-radius: 50%;
|
||||
box-shadow: 0 0 6px rgba(249,115,22,0.6);
|
||||
"></div>`,
|
||||
iconSize: [14, 14],
|
||||
iconAnchor: [7, 7],
|
||||
}),
|
||||
draggable: true,
|
||||
}).addTo(map);
|
||||
|
||||
// Update coords on drag
|
||||
rxMarkerRef.current.on('drag', (ev) => {
|
||||
const pos = ev.target.getLatLng();
|
||||
setRxCoordinates(pos.lat, pos.lng);
|
||||
});
|
||||
}
|
||||
|
||||
// Draw dashed line from TX to RX
|
||||
const selectedSite = getSelectedSite();
|
||||
if (selectedSite && linkLineRef.current) {
|
||||
linkLineRef.current.setLatLngs([[selectedSite.lat, selectedSite.lon], [lat, lng]]);
|
||||
} else if (selectedSite) {
|
||||
linkLineRef.current = L.polyline(
|
||||
[[selectedSite.lat, selectedSite.lon], [lat, lng]],
|
||||
{ color: '#f97316', weight: 2, dashArray: '8,4', opacity: 0.8 }
|
||||
).addTo(map);
|
||||
}
|
||||
|
||||
// Deactivate RX placement mode (single click action)
|
||||
clearTool();
|
||||
}
|
||||
|
||||
// Cleanup when Link Budget panel closes:
|
||||
function cleanupRxMarker() {
|
||||
if (rxMarkerRef.current) {
|
||||
rxMarkerRef.current.remove();
|
||||
rxMarkerRef.current = null;
|
||||
}
|
||||
if (linkLineRef.current) {
|
||||
linkLineRef.current.remove();
|
||||
linkLineRef.current = null;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix: Terrain Profile Click-Through
|
||||
|
||||
The Terrain Profile popup and its "Terrain Profile" trigger button must stop event propagation:
|
||||
|
||||
```typescript
|
||||
// On the Terrain Profile button in the measurement overlay:
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
showTerrainProfile();
|
||||
}}
|
||||
onMouseDown={(e) => e.stopPropagation()}
|
||||
onPointerDown={(e) => e.stopPropagation()}
|
||||
>
|
||||
Terrain Profile
|
||||
</button>
|
||||
|
||||
// On the Terrain Profile popup container:
|
||||
<div
|
||||
className="terrain-profile-popup"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
onMouseDown={(e) => e.stopPropagation()}
|
||||
onPointerDown={(e) => e.stopPropagation()}
|
||||
>
|
||||
{/* ... chart content ... */}
|
||||
</div>
|
||||
```
|
||||
|
||||
Also ensure the popup/panel has `pointer-events: auto` and is positioned with a high z-index above the map.
|
||||
|
||||
With the tool mode system in place, this becomes less critical since clicking terrain profile UI won't trigger ruler (ruler mode would be separate), but stopping propagation is still good practice.
|
||||
|
||||
---
|
||||
|
||||
## Fix: Default Cursor (Not Hand)
|
||||
|
||||
Override Leaflet's default grab cursor:
|
||||
|
||||
```css
|
||||
/* Global override in the app's main CSS */
|
||||
.leaflet-container {
|
||||
cursor: default !important;
|
||||
}
|
||||
|
||||
/* Only show grab when actually dragging */
|
||||
.leaflet-container.leaflet-dragging,
|
||||
.leaflet-container:active {
|
||||
cursor: grabbing !important;
|
||||
}
|
||||
|
||||
/* Remove grab cursor from interactive layers too */
|
||||
.leaflet-interactive {
|
||||
cursor: default !important;
|
||||
}
|
||||
|
||||
/* Tool-specific cursors applied via JS class toggle */
|
||||
.leaflet-container.tool-ruler {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
|
||||
.leaflet-container.tool-rx-placement {
|
||||
cursor: crosshair !important;
|
||||
}
|
||||
|
||||
.leaflet-container.tool-site-placement {
|
||||
cursor: cell !important;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Only ONE tool can be active at a time
|
||||
- [ ] Activating Ruler deactivates RX placement and vice versa
|
||||
- [ ] Default cursor is arrow (not hand/grab)
|
||||
- [ ] Cursor changes to crosshair when Ruler is active
|
||||
- [ ] Cursor changes to crosshair when RX placement is active
|
||||
- [ ] Cursor shows grabbing only when dragging map
|
||||
- [ ] Clicking Terrain Profile button does NOT place ruler point
|
||||
- [ ] Clicking any UI panel/popup does NOT place ruler point
|
||||
- [ ] Ruler point snaps to site marker when clicking within 20px
|
||||
- [ ] RX point click places orange marker on map
|
||||
- [ ] Dashed orange line appears from TX site to RX marker
|
||||
- [ ] RX marker is draggable (updates coordinates in panel)
|
||||
- [ ] RX marker removed when Link Budget panel closes
|
||||
- [ ] Right-click finishes ruler measurement
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
fix(tools): implement active tool mode system, fix click conflicts
|
||||
|
||||
- Add ActiveTool state (none/ruler/rx-placement/site-placement)
|
||||
- Single map click handler dispatches to active tool only
|
||||
- Fix cursor: default arrow, crosshair for tools, grabbing for drag
|
||||
- Add ruler snap-to-site (20px threshold)
|
||||
- Add RX marker with draggable orange dot and dashed line
|
||||
- Stop event propagation on all UI overlays above map
|
||||
- Clean up markers when panels close
|
||||
```
|
||||
106
docs/devlog/gpu_supp/RFCP-3.10.3-Calculator-Ruler-UX.md
Normal file
106
docs/devlog/gpu_supp/RFCP-3.10.3-Calculator-Ruler-UX.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# RFCP — Iteration 3.10.3: Calculator Shortcut & Ruler Limit
|
||||
|
||||
## Two small UX changes, no backend.
|
||||
|
||||
---
|
||||
|
||||
## 1. Link Budget Calculator — Quick Access Button
|
||||
|
||||
Move calculator access to a visible toolbar button, not buried in Map Tools panel.
|
||||
|
||||
**Location:** Top-left corner of the map, below the zoom controls (+/- buttons). Similar to how Fit, Reset, Topo, Grid, Ruler, Elev buttons are in the top-right.
|
||||
|
||||
**Implementation:**
|
||||
Add a button to the left toolbar (or create a small floating button group):
|
||||
|
||||
```typescript
|
||||
// Top-left button, below zoom controls
|
||||
<button
|
||||
className="map-tool-btn"
|
||||
onClick={() => setShowLinkBudget(!showLinkBudget)}
|
||||
title="Link Budget Calculator"
|
||||
>
|
||||
{/* Calculator icon — use an emoji or SVG */}
|
||||
🔗 {/* or a small "LB" text label, or a calculator SVG icon */}
|
||||
</button>
|
||||
```
|
||||
|
||||
**Styling:** Same visual style as the right-side tool buttons (Fit, Reset, Topo, Grid, Ruler, Elev) — dark rounded rectangle with light text/icon.
|
||||
|
||||
**Position options (pick one):**
|
||||
- **Option A:** Add to the RIGHT toolbar stack below "Elev" button — keeps all tools together
|
||||
- **Option B:** Floating button top-left below zoom — separate but prominent
|
||||
- **Option C:** Add to the measurement overlay bar (near the ruler distance display)
|
||||
|
||||
Recommend **Option A** — add "LB" or calculator icon button to the right toolbar stack, below Elev. Consistent with existing UI pattern.
|
||||
|
||||
Also: Remove the "Hide Link Budget Calculator" button from Map Tools panel (or keep it as secondary toggle — but primary access should be the toolbar button).
|
||||
|
||||
---
|
||||
|
||||
## 2. Ruler — Maximum 2 Points Only
|
||||
|
||||
**Problem:** Ruler currently allows unlimited points, creating a web of measurement lines. For RF point-to-point measurement, only 2 points make sense: start and end.
|
||||
|
||||
**Fix:** Limit ruler to exactly 2 points. When both points are placed, the measurement is complete. To start a new measurement, clicking again replaces the first point and clears the old measurement.
|
||||
|
||||
```typescript
|
||||
// In the map click handler for ruler mode:
|
||||
function handleRulerClick(e: L.LeafletMouseEvent) {
|
||||
const currentPoints = rulerPoints;
|
||||
|
||||
if (currentPoints.length === 0) {
|
||||
// First point
|
||||
setRulerPoints([snappedLatLng]);
|
||||
} else if (currentPoints.length === 1) {
|
||||
// Second point — measurement complete
|
||||
setRulerPoints([currentPoints[0], snappedLatLng]);
|
||||
// Optionally: auto-deactivate ruler mode after 2nd point
|
||||
// clearTool(); // uncomment if you want one-shot behavior
|
||||
} else {
|
||||
// Already 2 points — start new measurement
|
||||
// Replace: clear old points, start fresh with new first point
|
||||
setRulerPoints([snappedLatLng]);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
1. Click 1: Place start point (show marker)
|
||||
2. Click 2: Place end point (show marker + line + distance label + Terrain Profile button)
|
||||
3. Click 3: Clear previous, start new measurement from this click
|
||||
4. Right-click or Escape: Cancel/clear ruler entirely
|
||||
|
||||
**Remove:**
|
||||
- Remove "Right-click to finish" instruction (no longer needed — measurement auto-completes at 2 points)
|
||||
- Remove multi-point polyline rendering (only single line between 2 points)
|
||||
|
||||
**Visual:**
|
||||
- Show a single straight line between 2 points (green dashed, as current)
|
||||
- Distance label at midpoint
|
||||
- Terrain Profile button appears after 2nd point is placed
|
||||
- Small circle markers at both endpoints
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Calculator button visible in toolbar (right side, below Elev)
|
||||
- [ ] Click calculator button opens/closes Link Budget panel
|
||||
- [ ] Ruler allows exactly 2 points, no more
|
||||
- [ ] Third click starts new measurement (replaces old)
|
||||
- [ ] Escape clears ruler
|
||||
- [ ] Distance + Terrain Profile button appears after 2nd point
|
||||
- [ ] No multi-point web/polygon possible
|
||||
- [ ] Ruler still snaps to site markers
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
fix(ux): add calculator toolbar button, limit ruler to 2 points
|
||||
|
||||
- Add Link Budget Calculator button to right toolbar
|
||||
- Limit ruler to exactly 2 points (point-to-point only)
|
||||
- Third click starts new measurement, clears previous
|
||||
- Remove multi-point polyline behavior
|
||||
```
|
||||
136
docs/devlog/gpu_supp/RFCP-3.10.4-TerrainClick-TxHeight.md
Normal file
136
docs/devlog/gpu_supp/RFCP-3.10.4-TerrainClick-TxHeight.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# RFCP — Iteration 3.10.4: Terrain Profile Click Fix & TX Height
|
||||
|
||||
## Two bugs remaining from previous iterations.
|
||||
|
||||
---
|
||||
|
||||
## Bug 1: Terrain Profile click still places ruler point
|
||||
|
||||
**Problem:** Clicking inside the Terrain Profile popup (chart area, close button, fresnel checkbox, anywhere in the popup) triggers the map click handler underneath, which places a ruler point or resets the measurement.
|
||||
|
||||
**Previous fix was incomplete** — stopPropagation was added to some elements but not the entire popup container and its backdrop.
|
||||
|
||||
**Fix:** The Terrain Profile popup needs a FULL click barrier. Every mouse event must be caught:
|
||||
|
||||
```typescript
|
||||
// The OUTERMOST container of the Terrain Profile popup:
|
||||
<div
|
||||
className="terrain-profile-container"
|
||||
onClick={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
onMouseDown={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
onMouseUp={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
onPointerDown={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
onPointerUp={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
onDoubleClick={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
|
||||
>
|
||||
{/* All terrain profile content */}
|
||||
</div>
|
||||
```
|
||||
|
||||
**IMPORTANT:** `stopPropagation()` alone may not be enough because Leaflet listens to DOM events directly, not React synthetic events. The fix MUST also call `e.nativeEvent.stopImmediatePropagation()` to prevent Leaflet's native DOM listener from firing.
|
||||
|
||||
**Alternative approach (more robust):** Add the popup OUTSIDE the Leaflet map container in the DOM tree. If the Terrain Profile div is a sibling or parent of the map div (not a child), Leaflet's event delegation won't catch clicks on it at all.
|
||||
|
||||
```tsx
|
||||
// In the main layout:
|
||||
<div className="app-layout">
|
||||
<div id="map-container">
|
||||
{/* Leaflet map renders here */}
|
||||
</div>
|
||||
|
||||
{/* These are OUTSIDE the map container — Leaflet can't intercept */}
|
||||
{showTerrainProfile && (
|
||||
<TerrainProfile ... />
|
||||
)}
|
||||
{showLinkBudget && (
|
||||
<LinkBudgetPanel ... />
|
||||
)}
|
||||
</div>
|
||||
```
|
||||
|
||||
If moving outside the map container is too much refactoring, the stopImmediatePropagation approach should work. But check: is the TerrainProfile component rendered INSIDE a Leaflet pane or overlay? If so, moving it out is the correct fix.
|
||||
|
||||
**Also apply the same fix to:**
|
||||
- Link Budget Calculator panel
|
||||
- Any other floating panel/popup that sits over the map
|
||||
|
||||
---
|
||||
|
||||
## Bug 2: TX Height always shows 2m in Link Budget Calculator
|
||||
|
||||
**Problem:** The Link Budget Calculator TRANSMITTER section always shows `Height: 2m` regardless of the actual site configuration. It should read the height from the selected site's settings.
|
||||
|
||||
**Root cause:** The LinkBudgetPanel component likely reads `site.height` but the site object might store height in a different field name (e.g., `site.antennaHeight`, `site.towerHeight`, `site.params.height`, or per-sector height).
|
||||
|
||||
**Fix:** Find where site height is stored and pass the correct value:
|
||||
|
||||
```typescript
|
||||
// In LinkBudgetPanel.tsx, find where TX height is set:
|
||||
// WRONG (probably current):
|
||||
const txHeight = site.height || 2; // Defaults to 2 if field is missing
|
||||
|
||||
// Check the actual site data structure. It might be:
|
||||
const txHeight = site.antennaHeight
|
||||
|| site.tower_height
|
||||
|| site.params?.height
|
||||
|| site.sectors?.[0]?.height // If height is per-sector
|
||||
|| 30; // Default should be 30m for a typical cell tower, not 2m
|
||||
|
||||
// Or if height is stored in meters in a nested config:
|
||||
const txHeight = selectedSite?.config?.height || selectedSite?.height || 30;
|
||||
```
|
||||
|
||||
**Steps to debug:**
|
||||
1. In the browser console (F12), find the selected site object
|
||||
2. Check what field contains the height value
|
||||
3. Update LinkBudgetPanel to read from the correct field
|
||||
|
||||
**Display fix:**
|
||||
```typescript
|
||||
// In the TRANSMITTER section of the panel:
|
||||
<div className="param-row">
|
||||
<span>Height:</span>
|
||||
<span>{txHeight} m</span>
|
||||
</div>
|
||||
```
|
||||
|
||||
The height should also be EDITABLE in the link budget calculator (as an input field, not just display), since you might want to test "what if I put the antenna at 40m instead of 30m?" without changing the actual site config.
|
||||
|
||||
```typescript
|
||||
// Make height an editable field with site value as default:
|
||||
const [txHeightOverride, setTxHeightOverride] = useState<number | null>(null);
|
||||
const txHeight = txHeightOverride ?? (site?.height || 30);
|
||||
|
||||
<div className="param-row">
|
||||
<label>Height:</label>
|
||||
<input
|
||||
type="number"
|
||||
value={txHeight}
|
||||
onChange={(e) => setTxHeightOverride(parseFloat(e.target.value))}
|
||||
/> m
|
||||
</div>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Click ANYWHERE inside Terrain Profile popup — NO ruler point placed
|
||||
- [ ] Click Terrain Profile close button (X) — popup closes, no ruler point
|
||||
- [ ] Click Fresnel Zone checkbox — toggles, no ruler point
|
||||
- [ ] Click chart area — no ruler point
|
||||
- [ ] Drag/scroll inside chart — no map pan/zoom
|
||||
- [ ] TX Height in Link Budget shows actual site height (not 2m)
|
||||
- [ ] TX Height is editable for what-if scenarios
|
||||
- [ ] Changing TX height recalculates link budget
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
fix(ui): block all click propagation from terrain profile, fix TX height
|
||||
|
||||
- Add stopImmediatePropagation on terrain profile container
|
||||
- Prevent all mouse/pointer events from reaching Leaflet map
|
||||
- Fix TX height reading from site config (was defaulting to 2m)
|
||||
- Make TX height editable in link budget calculator
|
||||
```
|
||||
130
docs/devlog/gpu_supp/RFCP-3.6.0-GPU-Build-Task.md
Normal file
130
docs/devlog/gpu_supp/RFCP-3.6.0-GPU-Build-Task.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# RFCP 3.6.0 — Production GPU Build (Claude Code Task)
|
||||
|
||||
## Goal
|
||||
|
||||
Build `rfcp-server.exe` (PyInstaller) with CuPy GPU support so production RFCP
|
||||
detects the NVIDIA GPU without manual `pip install`.
|
||||
|
||||
Currently production exe shows "CPU (NumPy)" because CuPy is not bundled.
|
||||
|
||||
## Current Environment (CONFIRMED WORKING)
|
||||
|
||||
```
|
||||
Windows 10 (10.0.26200)
|
||||
Python 3.11.8 (C:\Python311)
|
||||
NVIDIA GeForce RTX 4060 Laptop GPU (8 GB VRAM)
|
||||
CUDA Toolkit 13.1 (C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1)
|
||||
CUDA_PATH = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1
|
||||
|
||||
Packages:
|
||||
cupy-cuda13x 13.6.0 ← NOT cuda12x!
|
||||
numpy 1.26.4
|
||||
scipy 1.17.0
|
||||
fastrlock 0.8.3
|
||||
pyinstaller 6.18.0
|
||||
|
||||
GPU compute verified:
|
||||
python -c "import cupy; a = cupy.array([1,2,3]); print(a.sum())" → 6 ✅
|
||||
```
|
||||
|
||||
## What We Already Tried (And Why It Failed)
|
||||
|
||||
### Attempt 1: ONEFILE spec with collect_all('cupy')
|
||||
- `collect_all('cupy')` returns 1882 datas, **0 binaries** — CuPy pip doesn't bundle DLLs on Windows
|
||||
- CUDA DLLs come from two separate sources:
|
||||
- **nvidia pip packages** (14 DLLs in `C:\Python311\Lib\site-packages\nvidia\*/bin/`)
|
||||
- **CUDA Toolkit** (13 DLLs in `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\bin\x64\`)
|
||||
- We manually collected these 27 DLLs in the spec
|
||||
- Build succeeded (3 GB exe!) but crashed on launch:
|
||||
```
|
||||
[PYI-10456:ERROR] Failed to extract cufft64_12.dll: decompression resulted in return code -1!
|
||||
```
|
||||
- Root cause: `cufft64_12.dll` is 297 MB — PyInstaller's zlib compression fails on it in ONEFILE mode
|
||||
|
||||
### Attempt 2: We were about to try ONEDIR but haven't built it yet
|
||||
|
||||
### Key Insight: Duplicate DLLs from two sources
|
||||
nvidia pip packages have CUDA 12.x DLLs (cublas64_12.dll etc.)
|
||||
CUDA Toolkit 13.1 has CUDA 13.x DLLs (cublas64_13.dll etc.)
|
||||
CuPy-cuda13x needs the 13.x versions. The 12.x from pip may conflict.
|
||||
|
||||
## What Needs To Happen
|
||||
|
||||
1. **Build rfcp-server as ONEDIR** (folder with exe + DLLs, not single exe)
|
||||
- This avoids the decompression crash with large CUDA DLLs
|
||||
- Output: `backend/dist/rfcp-server/rfcp-server.exe` + all DLLs alongside
|
||||
|
||||
2. **Include ONLY the correct CUDA DLLs**
|
||||
- Prefer CUDA Toolkit 13.1 DLLs (match cupy-cuda13x)
|
||||
- The nvidia pip packages have cuda12x DLLs — may cause version conflicts
|
||||
- Key DLLs needed: cublas, cusparse, cusolver, curand, cufft, nvrtc, cudart
|
||||
|
||||
3. **Exclude bloat** — the previous build pulled in tensorflow, grpc, opentelemetry etc.
|
||||
making it 3 GB. Real size should be ~600-800 MB.
|
||||
|
||||
4. **Test the built exe** — run it standalone and verify:
|
||||
- `curl http://localhost:8090/api/health` returns `"build": "gpu"`
|
||||
- `curl http://localhost:8090/api/gpu/status` returns `"available": true`
|
||||
- Or at minimum: the exe starts without errors and CuPy imports successfully
|
||||
|
||||
5. **Update Electron integration** if needed:
|
||||
- Current Electron expects a single `rfcp-server.exe` file
|
||||
- With ONEDIR, it's a folder `rfcp-server/rfcp-server.exe`
|
||||
- File: `desktop/main.js` or `desktop/src/main.ts` — look for where it spawns backend
|
||||
- The path needs to change from `resources/backend/rfcp-server.exe`
|
||||
to `resources/backend/rfcp-server/rfcp-server.exe`
|
||||
|
||||
## File Locations
|
||||
|
||||
```
|
||||
D:\root\rfcp\
|
||||
├── backend\
|
||||
│ ├── run_server.py ← PyInstaller entry point
|
||||
│ ├── app\
|
||||
│ │ ├── main.py ← FastAPI app
|
||||
│ │ ├── services\
|
||||
│ │ │ ├── gpu_backend.py ← GPU detection (CuPy/NumPy fallback)
|
||||
│ │ │ └── coverage_service.py ← Uses get_array_module()
|
||||
│ │ └── api\routes\gpu.py ← /api/gpu/status, /api/gpu/diagnostics
|
||||
│ ├── dist\ ← PyInstaller output goes here
|
||||
│ └── build\ ← PyInstaller build cache
|
||||
├── installer\
|
||||
│ ├── rfcp-server-gpu.spec ← GPU spec (needs fixing)
|
||||
│ ├── rfcp-server.spec ← CPU spec (working, don't touch)
|
||||
│ ├── rfcp.ico ← Icon (exists)
|
||||
│ └── build-gpu.bat ← Build script
|
||||
├── desktop\
|
||||
│ ├── main.js or src/main.ts ← Electron main process
|
||||
│ └── resources\backend\ ← Where production exe lives
|
||||
└── frontend\ ← React frontend (no changes needed)
|
||||
```
|
||||
|
||||
## Existing CPU spec for reference
|
||||
|
||||
The working CPU-only spec is at `installer/rfcp-server.spec`. Use it as the base
|
||||
and ADD CuPy + CUDA on top. Don't reinvent the wheel.
|
||||
|
||||
## Build Command
|
||||
|
||||
```powershell
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] `dist/rfcp-server/rfcp-server.exe` starts without errors
|
||||
- [ ] CuPy imports successfully inside the exe (no missing DLL errors)
|
||||
- [ ] `/api/gpu/status` returns `"available": true, "device": "RTX 4060"`
|
||||
- [ ] Total folder size < 1 GB (ideally 600-800 MB)
|
||||
- [ ] No tensorflow/grpc/opentelemetry bloat
|
||||
- [ ] Electron can find and launch the backend (path updated if needed)
|
||||
|
||||
## Important Notes
|
||||
|
||||
- Do NOT use cupy-cuda12x — we migrated to cupy-cuda13x
|
||||
- Do NOT try ONEFILE mode — cufft64_12.dll (297 MB) crashes decompression
|
||||
- The nvidia pip packages (nvidia-cublas-cu12, etc.) are still installed but may
|
||||
conflict with CUDA Toolkit 13.1 — prefer Toolkit DLLs
|
||||
- `collect_all('cupy')` gives 0 binaries on Windows — DLLs must be manually specified
|
||||
- gpu_backend.py already handles CuPy absence gracefully (falls back to NumPy)
|
||||
133
docs/devlog/gpu_supp/RFCP-3.7.0-GPU-Coverage-Task.md
Normal file
133
docs/devlog/gpu_supp/RFCP-3.7.0-GPU-Coverage-Task.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# RFCP 3.7.0 — GPU-Accelerated Coverage Calculations
|
||||
|
||||
## Context
|
||||
|
||||
Iteration 3.6.0 completed: CuPy-cuda13x works in production PyInstaller build,
|
||||
RTX 4060 detected, ONEDIR build with CUDA DLLs. BUT coverage calculations still
|
||||
run on CPU because coverage_service.py uses `import numpy as np` directly instead
|
||||
of the GPU backend.
|
||||
|
||||
The GPU infrastructure is ready:
|
||||
- `app/services/gpu_backend.py` has `GPUManager.get_array_module()` → returns cupy or numpy
|
||||
- `/api/gpu/status` confirms `"active_backend": "cuda"`
|
||||
- CuPy is imported and GPU detected in the frozen exe
|
||||
|
||||
## Goal
|
||||
|
||||
Replace direct `np.` calls in coverage_service.py with `xp = gpu_manager.get_array_module()`
|
||||
so calculations run on GPU when available, with automatic NumPy fallback.
|
||||
|
||||
## Files to Modify
|
||||
|
||||
### `app/services/coverage_service.py`
|
||||
|
||||
**Line 7**: `import numpy as np` — keep this but also import gpu_manager
|
||||
|
||||
Add near top:
|
||||
```python
|
||||
from app.services.gpu_backend import gpu_manager
|
||||
```
|
||||
|
||||
**Key sections to GPU-accelerate** (highest impact first):
|
||||
|
||||
#### 1. Grid array creation (lines 549-550, 922-923)
|
||||
```python
|
||||
# BEFORE:
|
||||
grid_lats = np.array([lat for lat, lon in grid])
|
||||
grid_lons = np.array([lon for lat, lon in grid])
|
||||
|
||||
# AFTER:
|
||||
xp = gpu_manager.get_array_module()
|
||||
grid_lats = xp.array([lat for lat, lon in grid])
|
||||
grid_lons = xp.array([lon for lat, lon in grid])
|
||||
```
|
||||
|
||||
#### 2. Trig calculations (line 468, 1031, 1408-1415, 1442)
|
||||
These use np.cos, np.radians, np.sin, np.degrees, np.arctan2 — all have CuPy equivalents.
|
||||
```python
|
||||
# BEFORE:
|
||||
lon_delta = settings.radius / (111000 * np.cos(np.radians(center_lat)))
|
||||
cos_lat = np.cos(np.radians(center_lat))
|
||||
|
||||
# AFTER:
|
||||
xp = gpu_manager.get_array_module()
|
||||
lon_delta = settings.radius / (111000 * float(xp.cos(xp.radians(center_lat))))
|
||||
cos_lat = float(xp.cos(xp.radians(center_lat)))
|
||||
```
|
||||
|
||||
#### 3. The heavy calculation loop — `_run_point_loop` (line 1070) and `_calculate_point_sync` (line 1112)
|
||||
This is where 90% of time is spent. Currently processes points one-by-one.
|
||||
The GPU win comes from vectorizing the path loss calculation across ALL grid points at once.
|
||||
|
||||
**Strategy**: Instead of looping through points, create arrays of all distances/angles
|
||||
and compute path loss for all points in one vectorized operation.
|
||||
|
||||
#### 4. `_calculate_bearing` (line 1402) — already vectorizable
|
||||
```python
|
||||
# All np.* functions here have direct CuPy equivalents
|
||||
# Just replace np → xp
|
||||
```
|
||||
|
||||
## Important Rules
|
||||
|
||||
1. **Always get xp at function scope**, not module scope:
|
||||
```python
|
||||
def my_function(self, ...):
|
||||
xp = gpu_manager.get_array_module()
|
||||
# use xp instead of np
|
||||
```
|
||||
|
||||
2. **Convert GPU arrays back to CPU** before returning to non-GPU code:
|
||||
```python
|
||||
if hasattr(result, 'get'): # CuPy array
|
||||
result = result.get() # → numpy array
|
||||
```
|
||||
|
||||
3. **Keep np for small/scalar operations** — GPU overhead isn't worth it for single values.
|
||||
Only use xp for array operations on 100+ elements.
|
||||
|
||||
4. **Don't break the fallback** — if CuPy isn't available, `get_array_module()` returns numpy,
|
||||
so `xp.array()` etc. work identically.
|
||||
|
||||
5. **Test both paths** — run with GPU and verify same results as CPU.
|
||||
|
||||
## Testing
|
||||
|
||||
After changes:
|
||||
```powershell
|
||||
# Rebuild
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
|
||||
|
||||
# Run
|
||||
.\dist\rfcp-server\rfcp-server.exe
|
||||
|
||||
# Test calculation via frontend — watch Task Manager GPU utilization
|
||||
# Should see GPU Compute spike during coverage calculation
|
||||
# Time should be significantly faster than 10s for 1254 points
|
||||
```
|
||||
|
||||
Compare before/after:
|
||||
- Current (CPU): ~10s for 1254 points, 5km radius
|
||||
- Expected (GPU): 1-3s for same calculation
|
||||
|
||||
Also test GPU diagnostics:
|
||||
```
|
||||
curl http://localhost:8888/api/gpu/diagnostics
|
||||
```
|
||||
|
||||
## What NOT to Change
|
||||
|
||||
- Don't modify gpu_backend.py — it's working correctly
|
||||
- Don't change the API endpoints or response format
|
||||
- Don't remove the NumPy import — keep it for non-array operations
|
||||
- Don't change propagation model math — only the array operations
|
||||
- Don't change _filter_buildings_to_bbox or OSM functions — they use lists not arrays
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] Coverage calculation uses GPU (visible in Task Manager)
|
||||
- [ ] Calculation time reduced for 1000+ point grids
|
||||
- [ ] CPU fallback still works (test by setting active_backend to cpu via API)
|
||||
- [ ] Same coverage results (heatmap should look identical)
|
||||
- [ ] No regression in tiled processing mode
|
||||
181
docs/devlog/gpu_supp/RFCP-3.8.0-Vectorize-Coverage-Task.md
Normal file
181
docs/devlog/gpu_supp/RFCP-3.8.0-Vectorize-Coverage-Task.md
Normal file
@@ -0,0 +1,181 @@
|
||||
# RFCP 3.8.0 — Vectorize Per-Point Coverage Calculations
|
||||
|
||||
## Context
|
||||
|
||||
Iteration 3.7.0 added GPU precompute for distances + base path loss (Phase 2.5).
|
||||
But Phase 3 (per-point loop) still runs on CPU, one point at a time across workers.
|
||||
This is where 95% of time goes on Full preset (195s for 6,642 points).
|
||||
|
||||
Current pipeline:
|
||||
```
|
||||
Phase 2.5 (GPU, 0.01s): distances + base path_loss → precomputed arrays
|
||||
Phase 3 (CPU, 195s): per-point terrain_loss, building_loss, reflections, vegetation
|
||||
```
|
||||
|
||||
Goal: Vectorize the heavy per-point calculations so GPU handles them in bulk.
|
||||
|
||||
## Architecture
|
||||
|
||||
The key insight: `_calculate_point_sync` (line ~1127) does these steps per point:
|
||||
|
||||
1. **Terrain LOS check** — get elevation profile between site and point, check clearance
|
||||
2. **Diffraction loss** — knife-edge based on Fresnel zone clearance
|
||||
3. **Building obstruction** — find buildings between site and point, calculate penetration loss
|
||||
4. **Materials penalty** — add loss based on building material type
|
||||
5. **Dominant path analysis** — LOS vs reflection vs diffraction
|
||||
6. **Street canyon** — check if point is in urban canyon
|
||||
7. **Reflections** — find reflection paths off buildings (most expensive!)
|
||||
8. **Vegetation loss** — check vegetation between site and point
|
||||
9. **Final RSRP** — tx_power - path_loss - terrain_loss - building_loss - veg_loss + gains
|
||||
|
||||
## Strategy: Vectorize in Stages
|
||||
|
||||
NOT everything can be vectorized equally. Prioritize by time spent:
|
||||
|
||||
### Stage 1: Terrain LOS + Diffraction (HIGH IMPACT)
|
||||
Currently: For each point, sample ~50-100 elevation values along radial path,
|
||||
find min clearance, compute knife-edge diffraction.
|
||||
|
||||
**Vectorize**: Create 2D elevation profiles for ALL points at once.
|
||||
- All points share the same site location
|
||||
- For N points, create N terrain profiles (each M samples)
|
||||
- Compute Fresnel clearance for all profiles vectorized
|
||||
- Compute diffraction loss vectorized
|
||||
|
||||
```python
|
||||
# Instead of per-point:
|
||||
for point in grid:
|
||||
profile = get_terrain_profile(site, point, num_samples=50)
|
||||
clearance = min_clearance(profile)
|
||||
loss = diffraction_loss(clearance, freq)
|
||||
|
||||
# Vectorized:
|
||||
xp = gpu_manager.get_array_module()
|
||||
# all_profiles shape: (N_points, M_samples)
|
||||
all_profiles = get_terrain_profiles_batch(site, all_points, num_samples=50)
|
||||
all_clearances = compute_clearances_batch(all_profiles, site_elev, point_elevs, distances)
|
||||
all_terrain_loss = diffraction_loss_batch(all_clearances, freq)
|
||||
```
|
||||
|
||||
### Stage 2: Building Obstruction (HIGH IMPACT)
|
||||
Currently: For each point, find nearby buildings, check if they obstruct path.
|
||||
|
||||
**Vectorize**: Use spatial indexing but batch the geometry checks.
|
||||
- Pre-compute building bounding boxes as GPU arrays
|
||||
- For each point, ray-building intersection can be done as matrix operation
|
||||
- Building penetration loss is simple lookup after intersection
|
||||
|
||||
NOTE: This is harder to vectorize because each point has different number of
|
||||
nearby buildings. Options:
|
||||
a) Pad to max buildings per point (wastes memory but simple)
|
||||
b) Use sparse representation
|
||||
c) Keep per-point but use GPU for the geometry math
|
||||
|
||||
Recommend option (c) initially — keep the spatial query on CPU but move
|
||||
the trig/geometry calculations to GPU.
|
||||
|
||||
### Stage 3: Reflections (MEDIUM IMPACT, only on Full preset)
|
||||
Currently: For each point with buildings, compute reflection paths.
|
||||
This is the most complex calculation and hardest to vectorize.
|
||||
|
||||
**Approach**: Keep reflections per-point for now, but optimize the inner math
|
||||
with vectorized operations.
|
||||
|
||||
### Stage 4: Vegetation Loss (LOW IMPACT)
|
||||
Simple lookup — not worth GPU overhead.
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Step 1: Batch terrain profiling
|
||||
Add to coverage_service.py a new method:
|
||||
```python
|
||||
def _batch_terrain_profiles(self, site_lat, site_lon, site_elev,
|
||||
grid_lats, grid_lons, grid_elevs,
|
||||
distances, frequency, num_samples=50):
|
||||
"""Compute terrain LOS and diffraction loss for all points at once."""
|
||||
xp = gpu_manager.get_array_module()
|
||||
N = len(grid_lats)
|
||||
|
||||
# Interpolate terrain profiles for all points
|
||||
# Each profile: site → point, num_samples elevation values
|
||||
# Use terrain tile data directly
|
||||
|
||||
# Compute Fresnel zone clearance for each profile
|
||||
# Compute knife-edge diffraction loss
|
||||
|
||||
return terrain_losses # shape (N,)
|
||||
```
|
||||
|
||||
### Step 2: Batch building check
|
||||
Add method:
|
||||
```python
|
||||
def _batch_building_obstruction(self, site_lat, site_lon,
|
||||
grid_lats, grid_lons,
|
||||
distances, buildings_spatial_index,
|
||||
all_buildings):
|
||||
"""Compute building loss for all points at once."""
|
||||
# For each point, query spatial index (CPU)
|
||||
# Batch the geometry intersection math (GPU)
|
||||
# Return losses
|
||||
|
||||
return building_losses # shape (N,)
|
||||
```
|
||||
|
||||
### Step 3: Replace _run_point_loop
|
||||
Instead of ProcessPool workers, do:
|
||||
```python
|
||||
# In calculate_coverage, after Phase 2.5:
|
||||
terrain_losses = self._batch_terrain_profiles(...)
|
||||
building_losses = self._batch_building_obstruction(...)
|
||||
|
||||
# Final RSRP is now fully vectorized:
|
||||
rsrp = tx_power - precomputed_path_loss - terrain_losses - building_losses - veg_losses
|
||||
# + antenna_gains + reflection_gains
|
||||
```
|
||||
|
||||
### Step 4: Keep worker fallback
|
||||
If GPU not available or for very complex calculations (reflections),
|
||||
fall back to the existing per-point ProcessPool approach.
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **GPU code only in main process** — learned from 3.7.0, never import gpu_manager in workers
|
||||
2. **Terrain data access** — terrain tiles are in memory, need efficient sampling for batch profiles
|
||||
3. **CuPy ↔ NumPy bridge** — use `xp.asnumpy()` or `.get()` to convert back to CPU
|
||||
4. **Memory** — 6,642 points × 50 terrain samples = 332,100 floats = 2.5 MB on GPU, no problem
|
||||
5. **Accuracy** — results must match existing per-point calculation within 1 dB
|
||||
|
||||
## Testing
|
||||
|
||||
```powershell
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
|
||||
.\dist\rfcp-server\rfcp-server.exe
|
||||
```
|
||||
|
||||
Compare Full preset:
|
||||
- Before (3.7.0): ~195s for 6,642 points
|
||||
- Target (3.8.0): <30s for same calculation
|
||||
- Stretch goal: <10s
|
||||
|
||||
Verify accuracy:
|
||||
- Run same location with GPU and CPU backend
|
||||
- Compare RSRP values — should be within 1 dB
|
||||
- Coverage percentages (Excellent/Good/Fair/Weak) should be very close
|
||||
|
||||
## What NOT to Change
|
||||
|
||||
- Don't modify propagation model math (Okumura-Hata, COST-231, Free-Space formulas)
|
||||
- Don't change API endpoints or response format
|
||||
- Don't remove the ProcessPool fallback — keep it for CPU-only mode
|
||||
- Don't change OSM fetching or caching
|
||||
- Don't modify the frontend
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] Full preset completes in <30s (was 195s)
|
||||
- [ ] Standard preset completes in <5s (was 7.2s)
|
||||
- [ ] No CuPy errors in worker processes
|
||||
- [ ] CPU fallback still works
|
||||
- [ ] Results match within 1 dB accuracy
|
||||
- [ ] GPU utilization visible in Task Manager during calculation
|
||||
436
docs/devlog/gpu_supp/RFCP-3.9.0-SRTM-Terrain-Integration.md
Normal file
436
docs/devlog/gpu_supp/RFCP-3.9.0-SRTM-Terrain-Integration.md
Normal file
@@ -0,0 +1,436 @@
|
||||
# RFCP 3.9.0 — SRTM1 Real Terrain Data Integration
|
||||
|
||||
## Context
|
||||
|
||||
RFCP currently downloads terrain tiles from an elevation API at runtime.
|
||||
This works but has limitations:
|
||||
- Requires internet connection
|
||||
- Unknown data source quality
|
||||
- No offline capability (critical for tactical/field use)
|
||||
- No control over resolution or caching
|
||||
|
||||
Goal: Replace with SRTM1 (30m resolution) HGT files, offline-first architecture.
|
||||
|
||||
## SRTM1 Data Format
|
||||
|
||||
HGT files are dead simple:
|
||||
- 1°×1° tiles, named by southwest corner: `N48E033.hgt`
|
||||
- 3601×3601 grid of signed 16-bit integers (big-endian)
|
||||
- Each value = elevation in meters
|
||||
- File size: exactly 25,934,402 bytes (3601 × 3601 × 2)
|
||||
- Row order: north to south (first row = northernmost)
|
||||
- Column order: west to east
|
||||
- Adjacent tiles overlap by 1 pixel on shared edges
|
||||
- Void/no-data value: -32768
|
||||
|
||||
Compressed (.hgt.zip): ~10-15 MB per tile typically.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Tile Storage Layout
|
||||
|
||||
```
|
||||
{app_data}/terrain/
|
||||
├── srtm1/ # 30m resolution tiles
|
||||
│ ├── N48E033.hgt # Uncompressed for fast access
|
||||
│ ├── N48E034.hgt
|
||||
│ ├── N48E035.hgt
|
||||
│ └── ...
|
||||
├── tile_index.json # Metadata: available tiles, checksums, dates
|
||||
└── downloads/ # Temporary download staging
|
||||
```
|
||||
|
||||
On Windows, `{app_data}` = the application's data directory.
|
||||
For PyInstaller exe: `data/terrain/` relative to exe location.
|
||||
The path must be configurable (environment variable or config file).
|
||||
|
||||
### Tile Manager (new file: `terrain_manager.py`)
|
||||
|
||||
```python
|
||||
class SRTMTileManager:
|
||||
"""Manages SRTM1 HGT tile storage, loading, and caching."""
|
||||
|
||||
def __init__(self, terrain_dir: str):
|
||||
self.terrain_dir = Path(terrain_dir)
|
||||
self.srtm1_dir = self.terrain_dir / "srtm1"
|
||||
self.srtm1_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# In-memory cache: tile_name -> numpy array
|
||||
self._tile_cache: Dict[str, np.ndarray] = {}
|
||||
self._max_cache_tiles = 16 # ~16 tiles = ~400 MB RAM
|
||||
|
||||
def get_tile_name(self, lat: float, lon: float) -> str:
|
||||
"""Convert lat/lon to SRTM tile name."""
|
||||
# Floor to get southwest corner
|
||||
lat_int = int(lat) if lat >= 0 else int(lat) - 1
|
||||
lon_int = int(lon) if lon >= 0 else int(lon) - 1
|
||||
|
||||
lat_prefix = "N" if lat_int >= 0 else "S"
|
||||
lon_prefix = "E" if lon_int >= 0 else "W"
|
||||
|
||||
return f"{lat_prefix}{abs(lat_int):02d}{lon_prefix}{abs(lon_int):03d}"
|
||||
|
||||
def get_required_tiles(self, center_lat, center_lon, radius_km) -> List[str]:
|
||||
"""Determine which tiles are needed for a coverage calculation."""
|
||||
# Calculate bounding box from center + radius
|
||||
# Return list of tile names
|
||||
|
||||
def has_tile(self, tile_name: str) -> bool:
|
||||
"""Check if tile exists locally."""
|
||||
return (self.srtm1_dir / f"{tile_name}.hgt").exists()
|
||||
|
||||
def load_tile(self, tile_name: str) -> Optional[np.ndarray]:
|
||||
"""Load tile from disk into memory. Returns 3601x3601 int16 array."""
|
||||
if tile_name in self._tile_cache:
|
||||
return self._tile_cache[tile_name]
|
||||
|
||||
hgt_path = self.srtm1_dir / f"{tile_name}.hgt"
|
||||
if not hgt_path.exists():
|
||||
return None
|
||||
|
||||
# Read raw HGT: big-endian signed 16-bit
|
||||
data = np.fromfile(str(hgt_path), dtype='>i2')
|
||||
tile = data.reshape((3601, 3601))
|
||||
|
||||
# Replace void values
|
||||
tile = tile.astype(np.float32)
|
||||
tile[tile == -32768] = np.nan
|
||||
|
||||
# Cache management (LRU-style: evict oldest if full)
|
||||
if len(self._tile_cache) >= self._max_cache_tiles:
|
||||
oldest_key = next(iter(self._tile_cache))
|
||||
del self._tile_cache[oldest_key]
|
||||
|
||||
self._tile_cache[tile_name] = tile
|
||||
return tile
|
||||
|
||||
def get_elevation(self, lat: float, lon: float) -> Optional[float]:
|
||||
"""Get elevation at a single point with bilinear interpolation."""
|
||||
tile_name = self.get_tile_name(lat, lon)
|
||||
tile = self.load_tile(tile_name)
|
||||
if tile is None:
|
||||
return None
|
||||
return self._bilinear_sample(tile, lat, lon)
|
||||
|
||||
def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
|
||||
"""Get elevations for array of points. Vectorized."""
|
||||
# Group points by tile
|
||||
# Load needed tiles
|
||||
# Vectorized bilinear interpolation per tile
|
||||
# Return array of elevations
|
||||
|
||||
async def download_tile(self, tile_name: str) -> bool:
|
||||
"""Download a single tile from remote source (if online)."""
|
||||
# Try multiple sources in order:
|
||||
# 1. Own server (future: UMTC sync endpoint)
|
||||
# 2. srtm.fasma.org (no auth required)
|
||||
# 3. viewfinderpanoramas.org (no auth, void-filled)
|
||||
# Returns True if successful
|
||||
|
||||
def get_missing_tiles(self, center_lat, center_lon, radius_km) -> List[str]:
|
||||
"""Check which needed tiles are not available locally."""
|
||||
required = self.get_required_tiles(center_lat, center_lon, radius_km)
|
||||
return [t for t in required if not self.has_tile(t)]
|
||||
```
|
||||
|
||||
### Bilinear Interpolation (CRITICAL for accuracy)
|
||||
|
||||
Current system uses nearest-neighbor (pick closest grid cell).
|
||||
SRTM1 at 30m means nearest-neighbor can have 15m positional error.
|
||||
Bilinear interpolation reduces this to sub-meter accuracy.
|
||||
|
||||
```python
|
||||
def _bilinear_sample(self, tile: np.ndarray, lat: float, lon: float) -> float:
|
||||
"""Sample elevation with bilinear interpolation."""
|
||||
# Tile southwest corner
|
||||
lat_int = int(lat) if lat >= 0 else int(lat) - 1
|
||||
lon_int = int(lon) if lon >= 0 else int(lon) - 1
|
||||
|
||||
# Fractional position within tile (0.0 to 1.0)
|
||||
lat_frac = lat - lat_int # 0 = south edge, 1 = north edge
|
||||
lon_frac = lon - lon_int # 0 = west edge, 1 = east edge
|
||||
|
||||
# Convert to row/col (note: rows go north to south!)
|
||||
row_exact = (1.0 - lat_frac) * 3600.0 # 0 = north, 3600 = south
|
||||
col_exact = lon_frac * 3600.0 # 0 = west, 3600 = east
|
||||
|
||||
# Four surrounding grid points
|
||||
r0 = int(row_exact)
|
||||
c0 = int(col_exact)
|
||||
r1 = min(r0 + 1, 3600)
|
||||
c1 = min(c0 + 1, 3600)
|
||||
|
||||
# Fractional position between grid points
|
||||
dr = row_exact - r0
|
||||
dc = col_exact - c0
|
||||
|
||||
# Bilinear interpolation
|
||||
z00 = tile[r0, c0]
|
||||
z01 = tile[r0, c1]
|
||||
z10 = tile[r1, c0]
|
||||
z11 = tile[r1, c1]
|
||||
|
||||
# Handle NaN (void) values
|
||||
if np.isnan(z00) or np.isnan(z01) or np.isnan(z10) or np.isnan(z11):
|
||||
# Fall back to nearest non-NaN
|
||||
valid = [(z00, 0, 0), (z01, 0, 1), (z10, 1, 0), (z11, 1, 1)]
|
||||
valid = [(z, r, c) for z, r, c in valid if not np.isnan(z)]
|
||||
return valid[0][0] if valid else 0.0
|
||||
|
||||
elevation = (z00 * (1 - dr) * (1 - dc) +
|
||||
z01 * (1 - dr) * dc +
|
||||
z10 * dr * (1 - dc) +
|
||||
z11 * dr * dc)
|
||||
|
||||
return float(elevation)
|
||||
```
|
||||
|
||||
### Vectorized Batch Elevation (for GPU pipeline)
|
||||
|
||||
This replaces the current `_batch_elevation_lookup` in gpu_service.py.
|
||||
Must handle multi-tile seamlessly.
|
||||
|
||||
```python
|
||||
def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
|
||||
"""Vectorized elevation lookup with bilinear interpolation.
|
||||
|
||||
Handles points spanning multiple tiles efficiently.
|
||||
Groups points by tile, processes each tile with full NumPy vectorization.
|
||||
"""
|
||||
elevations = np.zeros(len(lats), dtype=np.float32)
|
||||
|
||||
# Compute tile indices for each point
|
||||
lat_ints = np.where(lats >= 0, np.floor(lats).astype(int),
|
||||
np.floor(lats).astype(int))
|
||||
lon_ints = np.where(lons >= 0, np.floor(lons).astype(int),
|
||||
np.floor(lons).astype(int))
|
||||
|
||||
# Group by tile
|
||||
tile_keys = lat_ints * 1000 + lon_ints # unique key per tile
|
||||
unique_keys = np.unique(tile_keys)
|
||||
|
||||
for key in unique_keys:
|
||||
mask = tile_keys == key
|
||||
lat_int = int(key // 1000)
|
||||
lon_int = int(key % 1000)
|
||||
if lon_int > 500: # handle negative longitudes
|
||||
lon_int -= 1000
|
||||
|
||||
tile_name = self._make_tile_name(lat_int, lon_int)
|
||||
tile = self.load_tile(tile_name)
|
||||
|
||||
if tile is None:
|
||||
elevations[mask] = 0.0 # no data
|
||||
continue
|
||||
|
||||
# Vectorized bilinear for all points in this tile
|
||||
tile_lats = lats[mask]
|
||||
tile_lons = lons[mask]
|
||||
|
||||
lat_frac = tile_lats - lat_int
|
||||
lon_frac = tile_lons - lon_int
|
||||
|
||||
row_exact = (1.0 - lat_frac) * 3600.0
|
||||
col_exact = lon_frac * 3600.0
|
||||
|
||||
r0 = np.clip(row_exact.astype(int), 0, 3599)
|
||||
c0 = np.clip(col_exact.astype(int), 0, 3599)
|
||||
r1 = np.clip(r0 + 1, 0, 3600)
|
||||
c1 = np.clip(c0 + 1, 0, 3600)
|
||||
|
||||
dr = row_exact - r0
|
||||
dc = col_exact - c0
|
||||
|
||||
z00 = tile[r0, c0]
|
||||
z01 = tile[r0, c1]
|
||||
z10 = tile[r1, c0]
|
||||
z11 = tile[r1, c1]
|
||||
|
||||
result = (z00 * (1 - dr) * (1 - dc) +
|
||||
z01 * (1 - dr) * dc +
|
||||
z10 * dr * (1 - dc) +
|
||||
z11 * dr * dc)
|
||||
|
||||
# Handle NaN voids
|
||||
nan_mask = np.isnan(result)
|
||||
if nan_mask.any():
|
||||
result[nan_mask] = 0.0
|
||||
|
||||
elevations[mask] = result
|
||||
|
||||
return elevations
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
### 1. Replace terrain_service.py elevation lookup
|
||||
|
||||
Current terrain service downloads elevation data from an API.
|
||||
Replace with SRTMTileManager calls:
|
||||
|
||||
```python
|
||||
# OLD:
|
||||
elevation = await self.terrain_service.get_elevation(lat, lon)
|
||||
|
||||
# NEW:
|
||||
elevation = self.tile_manager.get_elevation(lat, lon)
|
||||
# Or for batch (GPU pipeline Phase 2.6):
|
||||
elevations = self.tile_manager.get_elevations_batch(lats_array, lons_array)
|
||||
```
|
||||
|
||||
### 2. Replace _batch_elevation_lookup in gpu_service.py
|
||||
|
||||
The vectorized elevation lookup in gpu_service.py currently loads tiles
|
||||
and does nearest-neighbor sampling. Replace with tile_manager.get_elevations_batch()
|
||||
which does bilinear interpolation.
|
||||
|
||||
### 3. Coverage service pre-check
|
||||
|
||||
Before starting calculation, check if all needed tiles are available:
|
||||
|
||||
```python
|
||||
missing = self.tile_manager.get_missing_tiles(site_lat, site_lon, radius_km)
|
||||
if missing:
|
||||
if has_internet:
|
||||
# Try to download missing tiles
|
||||
for tile_name in missing:
|
||||
await self.tile_manager.download_tile(tile_name)
|
||||
else:
|
||||
# Return warning to frontend
|
||||
return {"warning": f"Missing terrain tiles: {missing}. Using flat terrain."}
|
||||
```
|
||||
|
||||
### 4. Frontend notification
|
||||
|
||||
When tiles are missing, show a warning banner:
|
||||
"⚠ Terrain data not available for this area. Coverage accuracy reduced."
|
||||
|
||||
When tiles are being downloaded:
|
||||
"⬇ Downloading terrain data... (N48E033.hgt, 12.5 MB)"
|
||||
|
||||
### 5. Terrain Profile Viewer
|
||||
|
||||
The terrain profile viewer should use the same tile_manager
|
||||
for consistent elevation data. With bilinear interpolation,
|
||||
profiles will be much smoother and more accurate.
|
||||
|
||||
## Download Sources (Priority Order)
|
||||
|
||||
For auto-download when online:
|
||||
|
||||
1. **srtm.fasma.org** (no auth, direct HGT.zip download)
|
||||
URL: `https://srtm.fasma.org/N48E033.SRTMGL1.hgt.zip`
|
||||
- Free, no registration
|
||||
- SRTM1 (30m) data
|
||||
- May be slow or unreliable
|
||||
|
||||
2. **viewfinderpanoramas.org** (no auth, void-filled data)
|
||||
URL: `http://viewfinderpanoramas.org/dem1/{region}/{tile}.hgt.zip`
|
||||
- Free, no registration
|
||||
- Void areas filled from topographic maps
|
||||
- Better quality in mountainous areas
|
||||
- File naming might differ by region
|
||||
|
||||
3. **Future: UMTC sync server**
|
||||
URL: `https://rfcp.{your-domain}/api/terrain/tiles/{tile_name}.hgt`
|
||||
- Self-hosted on your infrastructure
|
||||
- Accessible via WireGuard mesh
|
||||
- Can pre-populate with full Ukraine dataset
|
||||
|
||||
## Offline Bundle Strategy
|
||||
|
||||
For installer / field deployment:
|
||||
|
||||
### Option A: Region packs
|
||||
Pre-package tiles by operational area:
|
||||
- `terrain-dnipro.zip` — 4 tiles around Dnipro area (~100 MB)
|
||||
- `terrain-ukraine-east.zip` — ~50 tiles, eastern Ukraine (~1.2 GB)
|
||||
- `terrain-ukraine-full.zip` — ~171 tiles, all Ukraine (~4.3 GB)
|
||||
|
||||
### Option B: On-demand with cache
|
||||
Ship empty, download tiles as needed on first calculation.
|
||||
Cache permanently. Works well for development/testing.
|
||||
|
||||
### Option C: Live USB bundle
|
||||
For tactical deployment, include full Ukraine terrain data
|
||||
on the live USB alongside the application. 4.3 GB is acceptable
|
||||
for a USB drive.
|
||||
|
||||
Recommend: **Option B for now** (development), **Option C for deployment**.
|
||||
|
||||
## File Changes
|
||||
|
||||
### New Files
|
||||
- `backend/app/services/terrain_manager.py` — SRTMTileManager class
|
||||
|
||||
### Modified Files
|
||||
- `backend/app/services/terrain_service.py` — Replace API calls with tile_manager
|
||||
- `backend/app/services/gpu_service.py` — Replace _batch_elevation_lookup
|
||||
- `backend/app/services/coverage_service.py` — Add missing tile pre-check
|
||||
- `backend/app/main.py` — Initialize tile_manager on startup
|
||||
|
||||
### Config
|
||||
- Add `TERRAIN_DIR` environment variable / config option
|
||||
- Default: `./data/terrain` relative to backend exe
|
||||
|
||||
## Testing
|
||||
|
||||
```powershell
|
||||
# Build and test
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
|
||||
.\dist\rfcp-server\rfcp-server.exe
|
||||
```
|
||||
|
||||
### Test 1: First run (no tiles cached)
|
||||
- Start app, trigger calculation
|
||||
- Should attempt to download required tile(s)
|
||||
- If online: downloads, caches, calculates
|
||||
- If offline: warning, flat terrain fallback
|
||||
|
||||
### Test 2: Cached tiles
|
||||
- Run same calculation again
|
||||
- Tile loaded from disk cache, no download
|
||||
- Should be fast (tile load from disk < 100ms)
|
||||
|
||||
### Test 3: Accuracy comparison
|
||||
- Compare elevation at known points (e.g., Dnipro city center)
|
||||
- Cross-reference with Google Earth elevation
|
||||
- Expected accuracy: ±5m horizontal, ±16m vertical (SRTM spec)
|
||||
|
||||
### Test 4: Multi-tile calculation
|
||||
- Set radius to 50km+ to span multiple tiles
|
||||
- Verify seamless stitching at tile boundaries
|
||||
- No elevation jumps or artifacts at edges
|
||||
|
||||
### Test 5: Terrain profile
|
||||
- Draw terrain profile across tile boundary
|
||||
- Should be smooth, no discontinuity
|
||||
- Compare with Google Earth profile for same path
|
||||
|
||||
### Test 6: Performance
|
||||
- Tile load time from disk: <100ms
|
||||
- Batch elevation lookup (6000 points): <50ms
|
||||
- Should not regress overall calculation time
|
||||
- Memory: ~25 MB per loaded tile, max 16 tiles = 400 MB
|
||||
|
||||
## What NOT to Change
|
||||
|
||||
- Don't modify GPU pipeline architecture (Phase 2.5/2.6/2.7)
|
||||
- Don't change propagation model math
|
||||
- Don't change API endpoints or response format
|
||||
- Don't change frontend map or heatmap rendering
|
||||
- Don't change OSM building/vegetation fetching
|
||||
- Don't change PyInstaller build process (just add data dir)
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] SRTM1 tiles load correctly (3601×3601, 30m resolution)
|
||||
- [ ] Bilinear interpolation working (smoother than nearest-neighbor)
|
||||
- [ ] Offline mode works with pre-cached tiles
|
||||
- [ ] Auto-download works when online
|
||||
- [ ] Missing tile warning shown to user
|
||||
- [ ] Multi-tile seamless stitching
|
||||
- [ ] Terrain profile accuracy matches Google Earth within 20m
|
||||
- [ ] No performance regression (calculation time same or faster)
|
||||
- [ ] Tile cache directory configurable
|
||||
246
docs/devlog/gpu_supp/RFCP-3.9.1-Terra-Integration.md
Normal file
246
docs/devlog/gpu_supp/RFCP-3.9.1-Terra-Integration.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# RFCP — Iteration 3.9.1: Terra Tile Server Integration
|
||||
|
||||
## Overview
|
||||
Connect terrain_service.py to our SRTM tile server (terra.eliah.one) as primary download source, add terrain status API endpoint, and create a bulk pre-download utility. The `data/terrain/` directory already exists.
|
||||
|
||||
## Context
|
||||
- terra.eliah.one is live and serving tiles via Caddy file_server
|
||||
- SRTM3 (90m): 187 tiles, 515 MB — full Ukraine coverage (N44-N51, E018-E041)
|
||||
- SRTM1 (30m): 160 tiles, 3.9 GB — same coverage area
|
||||
- terrain_service.py already has bilinear interpolation (3.9.0)
|
||||
- Backend runs on Windows with RTX 4060, tiles stored locally in `data/terrain/`
|
||||
- Server is download source, NOT used during realtime calculations
|
||||
|
||||
## Changes Required
|
||||
|
||||
### 1. Update SRTM_SOURCES in terrain_service.py
|
||||
|
||||
**File:** `backend/app/services/terrain_service.py`
|
||||
|
||||
Replace current SRTM_SOURCES (lines 22-25):
|
||||
```python
|
||||
SRTM_SOURCES = [
|
||||
"https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
"https://s3.amazonaws.com/elevation-tiles-prod/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
]
|
||||
```
|
||||
|
||||
With prioritized source list:
|
||||
```python
|
||||
SRTM_SOURCES = [
|
||||
# Our tile server — SRTM1 (30m) preferred, uncompressed
|
||||
{
|
||||
"url": "https://terra.eliah.one/srtm1/{tile_name}.hgt",
|
||||
"compressed": False,
|
||||
"resolution": "srtm1",
|
||||
},
|
||||
# Our tile server — SRTM3 (90m) fallback
|
||||
{
|
||||
"url": "https://terra.eliah.one/srtm3/{tile_name}.hgt",
|
||||
"compressed": False,
|
||||
"resolution": "srtm3",
|
||||
},
|
||||
# Public AWS mirror — SRTM1, gzip compressed
|
||||
{
|
||||
"url": "https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
|
||||
"compressed": True,
|
||||
"resolution": "srtm1",
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
Update `download_tile()` to handle the new source format:
|
||||
```python
|
||||
async def download_tile(self, tile_name: str) -> bool:
|
||||
"""Download SRTM tile from configured sources, preferring highest resolution."""
|
||||
tile_path = self.get_tile_path(tile_name)
|
||||
|
||||
if tile_path.exists():
|
||||
return True
|
||||
|
||||
lat_dir = tile_name[:3] # e.g., "N48"
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0, follow_redirects=True) as client:
|
||||
for source in self.SRTM_SOURCES:
|
||||
url = source["url"].format(lat_dir=lat_dir, tile_name=tile_name)
|
||||
try:
|
||||
response = await client.get(url)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.content
|
||||
|
||||
# Skip empty responses
|
||||
if len(data) < 1000:
|
||||
continue
|
||||
|
||||
if source["compressed"]:
|
||||
if url.endswith('.gz'):
|
||||
data = gzip.decompress(data)
|
||||
elif url.endswith('.zip'):
|
||||
with zipfile.ZipFile(io.BytesIO(data)) as zf:
|
||||
for name in zf.namelist():
|
||||
if name.endswith('.hgt'):
|
||||
data = zf.read(name)
|
||||
break
|
||||
|
||||
# Validate tile size
|
||||
if len(data) not in (3601 * 3601 * 2, 1201 * 1201 * 2):
|
||||
print(f"[Terrain] Invalid tile size {len(data)} from {url}")
|
||||
continue
|
||||
|
||||
tile_path.write_bytes(data)
|
||||
res = source["resolution"]
|
||||
size_mb = len(data) / 1048576
|
||||
print(f"[Terrain] Downloaded {tile_name} ({res}, {size_mb:.1f} MB)")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"[Terrain] Failed from {url}: {e}")
|
||||
continue
|
||||
|
||||
print(f"[Terrain] Could not download {tile_name} from any source")
|
||||
return False
|
||||
```
|
||||
|
||||
### 2. Add Terrain Status API Endpoint
|
||||
|
||||
**File:** `backend/app/api/routes.py` (or wherever API routes are defined)
|
||||
|
||||
Add a new endpoint:
|
||||
```python
|
||||
@router.get("/api/terrain/status")
|
||||
async def terrain_status():
|
||||
"""Return terrain data availability info."""
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
cached_tiles = terrain_service.get_cached_tiles()
|
||||
cache_size = terrain_service.get_cache_size_mb()
|
||||
|
||||
# Categorize by resolution
|
||||
srtm1_tiles = [t for t in cached_tiles
|
||||
if (terrain_service.terrain_path / f"{t}.hgt").stat().st_size == 3601 * 3601 * 2]
|
||||
srtm3_tiles = [t for t in cached_tiles if t not in srtm1_tiles]
|
||||
|
||||
return {
|
||||
"total_tiles": len(cached_tiles),
|
||||
"srtm1": {
|
||||
"count": len(srtm1_tiles),
|
||||
"resolution_m": 30,
|
||||
"tiles": sorted(srtm1_tiles),
|
||||
},
|
||||
"srtm3": {
|
||||
"count": len(srtm3_tiles),
|
||||
"resolution_m": 90,
|
||||
"tiles": sorted(srtm3_tiles),
|
||||
},
|
||||
"cache_size_mb": round(cache_size, 1),
|
||||
"memory_cached": len(terrain_service._tile_cache),
|
||||
"terra_server": "https://terra.eliah.one",
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Add Bulk Pre-Download Endpoint
|
||||
|
||||
**File:** Same routes file
|
||||
|
||||
```python
|
||||
@router.post("/api/terrain/download")
|
||||
async def terrain_download(request: dict):
|
||||
"""Pre-download tiles for a region.
|
||||
|
||||
Body: {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 50}
|
||||
Or: {"tiles": ["N48E034", "N48E035", "N47E034", "N47E035"]}
|
||||
"""
|
||||
from app.services.terrain_service import terrain_service
|
||||
|
||||
if "tiles" in request:
|
||||
tile_list = request["tiles"]
|
||||
else:
|
||||
center_lat = request.get("center_lat", 48.46)
|
||||
center_lon = request.get("center_lon", 35.04)
|
||||
radius_km = request.get("radius_km", 50)
|
||||
tile_list = terrain_service.get_required_tiles(center_lat, center_lon, radius_km)
|
||||
|
||||
missing = [t for t in tile_list if not terrain_service.get_tile_path(t).exists()]
|
||||
|
||||
if not missing:
|
||||
return {"status": "ok", "message": "All tiles already cached", "count": len(tile_list)}
|
||||
|
||||
# Download missing tiles
|
||||
downloaded = []
|
||||
failed = []
|
||||
for tile_name in missing:
|
||||
success = await terrain_service.download_tile(tile_name)
|
||||
if success:
|
||||
downloaded.append(tile_name)
|
||||
else:
|
||||
failed.append(tile_name)
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"required": len(tile_list),
|
||||
"already_cached": len(tile_list) - len(missing),
|
||||
"downloaded": downloaded,
|
||||
"failed": failed,
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Add Tile Index Endpoint
|
||||
|
||||
**File:** Same routes file
|
||||
|
||||
```python
|
||||
@router.get("/api/terrain/index")
|
||||
async def terrain_index():
|
||||
"""Fetch tile index from terra server."""
|
||||
import httpx
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.get("https://terra.eliah.one/api/index")
|
||||
if resp.status_code == 200:
|
||||
return resp.json()
|
||||
except Exception:
|
||||
pass
|
||||
return {"error": "Could not reach terra.eliah.one", "offline": True}
|
||||
```
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] `GET /api/terrain/status` returns tile counts and sizes
|
||||
- [ ] `POST /api/terrain/download {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 10}` downloads missing tiles from terra.eliah.one
|
||||
- [ ] Tiles downloaded from terra are valid HGT format (2,884,802 or 25,934,402 bytes)
|
||||
- [ ] SRTM1 is preferred over SRTM3 when downloading
|
||||
- [ ] Existing tiles are not re-downloaded
|
||||
- [ ] Coverage calculation works with terrain data (test with Dnipro coordinates)
|
||||
- [ ] `GET /api/terrain/index` returns terra server tile list
|
||||
|
||||
## Build & Deploy
|
||||
|
||||
```bash
|
||||
cd D:\root\rfcp\backend
|
||||
# No build needed — Python backend, just restart
|
||||
# Kill existing uvicorn and restart:
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
|
||||
```
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
feat(terrain): integrate terra.eliah.one tile server
|
||||
|
||||
- Add terra.eliah.one as primary SRTM source (SRTM1 30m preferred)
|
||||
- Keep AWS S3 as fallback source
|
||||
- Add /api/terrain/status endpoint (tile inventory)
|
||||
- Add /api/terrain/download endpoint (bulk pre-download)
|
||||
- Add /api/terrain/index endpoint (terra server index)
|
||||
- Validate tile size before saving
|
||||
- Add follow_redirects=True to httpx client
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. terrain_service downloads from terra.eliah.one first
|
||||
2. /api/terrain/status shows correct tile counts by resolution
|
||||
3. /api/terrain/download fetches tiles for any Ukrainian coordinate
|
||||
4. Offline mode works — no downloads attempted if tiles exist locally
|
||||
5. Coverage calculation uses real elevation data instead of flat terrain
|
||||
656
docs/devlog/gpu_supp/RFCP-Dependencies-Installer.md
Normal file
656
docs/devlog/gpu_supp/RFCP-Dependencies-Installer.md
Normal file
@@ -0,0 +1,656 @@
|
||||
# RFCP Dependencies & Installer Specification
|
||||
|
||||
## Overview
|
||||
|
||||
All dependencies needed for RFCP to work out of the box, including GPU acceleration.
|
||||
The installer must handle everything — user should NOT need to run pip manually.
|
||||
|
||||
---
|
||||
|
||||
## Python Dependencies
|
||||
|
||||
### Core (MUST have)
|
||||
|
||||
```txt
|
||||
# requirements.txt
|
||||
|
||||
# Web framework
|
||||
fastapi>=0.104.0
|
||||
uvicorn[standard]>=0.24.0
|
||||
websockets>=12.0
|
||||
|
||||
# Scientific computing
|
||||
numpy>=1.24.0
|
||||
scipy>=1.11.0
|
||||
|
||||
# Geospatial
|
||||
pyproj>=3.6.0 # coordinate transformations
|
||||
shapely>=2.0.0 # geometry operations (boundary contours)
|
||||
|
||||
# Terrain data
|
||||
rasterio>=1.3.0 # GeoTIFF reading (optional, for custom terrain)
|
||||
# Note: SRTM .hgt files read with numpy directly
|
||||
|
||||
# OSM data
|
||||
requests>=2.31.0 # HTTP client for OSM Overpass API
|
||||
geopy>=2.4.0 # distance calculations
|
||||
|
||||
# Database
|
||||
# sqlite3 is built-in Python — no install needed
|
||||
|
||||
# Utilities
|
||||
orjson>=3.9.0 # fast JSON (optional, faster API responses)
|
||||
pydantic>=2.0.0 # data validation (FastAPI dependency)
|
||||
```
|
||||
|
||||
### GPU Acceleration (OPTIONAL — auto-detected)
|
||||
|
||||
```txt
|
||||
# requirements-gpu-nvidia.txt
|
||||
cupy-cuda12x>=12.0.0 # For CUDA 12.x (RTX 30xx, 40xx)
|
||||
# OR
|
||||
cupy-cuda11x>=11.0.0 # For CUDA 11.x (older cards)
|
||||
|
||||
# requirements-gpu-opencl.txt
|
||||
pyopencl>=2023.1 # For ANY GPU (Intel, AMD, NVIDIA)
|
||||
```
|
||||
|
||||
### Development / Testing
|
||||
|
||||
```txt
|
||||
# requirements-dev.txt
|
||||
pytest>=7.0.0
|
||||
pytest-asyncio>=0.21.0
|
||||
httpx>=0.25.0 # async test client
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## System Dependencies
|
||||
|
||||
### NVIDIA GPU Support
|
||||
|
||||
```
|
||||
REQUIRED: NVIDIA Driver (comes with GPU)
|
||||
REQUIRED: CUDA Toolkit 12.x (for CuPy)
|
||||
|
||||
Check if installed:
|
||||
nvidia-smi → shows driver version
|
||||
nvcc --version → shows CUDA toolkit version
|
||||
|
||||
If missing CUDA toolkit:
|
||||
Download from: https://developer.nvidia.com/cuda-downloads
|
||||
Select: Windows > x86_64 > 11/10 > exe (local)
|
||||
Size: ~3 GB
|
||||
|
||||
Alternative: cupy auto-installs CUDA runtime!
|
||||
pip install cupy-cuda12x
|
||||
This bundles CUDA runtime (~700 MB) — no separate install needed
|
||||
```
|
||||
|
||||
### Intel GPU Support (OpenCL)
|
||||
|
||||
```
|
||||
REQUIRED: Intel GPU Driver (usually pre-installed)
|
||||
REQUIRED: Intel OpenCL Runtime
|
||||
|
||||
Check if installed:
|
||||
Open Device Manager → Display Adapters → Intel UHD/Iris
|
||||
|
||||
For OpenCL:
|
||||
Download Intel GPU Computing Runtime:
|
||||
https://github.com/intel/compute-runtime/releases
|
||||
|
||||
Or: Intel oneAPI Base Toolkit (includes OpenCL)
|
||||
https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html
|
||||
```
|
||||
|
||||
### AMD GPU Support (OpenCL)
|
||||
|
||||
```
|
||||
REQUIRED: AMD Adrenalin Driver (includes OpenCL)
|
||||
Download from: https://www.amd.com/en/support
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Node.js / Frontend Dependencies
|
||||
|
||||
### System Requirements
|
||||
|
||||
```
|
||||
Node.js >= 18.0.0 (LTS recommended)
|
||||
npm >= 9.0.0
|
||||
|
||||
Check:
|
||||
node --version
|
||||
npm --version
|
||||
```
|
||||
|
||||
### Frontend packages (managed by npm)
|
||||
|
||||
```json
|
||||
// package.json — key dependencies
|
||||
{
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"leaflet": "^1.9.4",
|
||||
"react-leaflet": "^4.2.0",
|
||||
"recharts": "^2.8.0",
|
||||
"zustand": "^4.4.0",
|
||||
"lucide-react": "^0.294.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"vite": "^5.0.0",
|
||||
"typescript": "^5.3.0",
|
||||
"tailwindcss": "^3.4.0",
|
||||
"@types/leaflet": "^1.9.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Installer Script
|
||||
|
||||
### Windows Installer (NSIS or Electron-Builder)
|
||||
|
||||
```python
|
||||
# install_rfcp.py — Python-based installer/setup script
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import platform
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
|
||||
def check_python():
|
||||
"""Verify Python 3.10+ is available."""
|
||||
version = sys.version_info
|
||||
if version.major < 3 or version.minor < 10:
|
||||
print(f"❌ Python 3.10+ required, found {version.major}.{version.minor}")
|
||||
return False
|
||||
print(f"✅ Python {version.major}.{version.minor}.{version.micro}")
|
||||
return True
|
||||
|
||||
def check_node():
|
||||
"""Verify Node.js 18+ is available."""
|
||||
try:
|
||||
result = subprocess.run(["node", "--version"], capture_output=True, text=True)
|
||||
version = result.stdout.strip().lstrip('v')
|
||||
major = int(version.split('.')[0])
|
||||
if major < 18:
|
||||
print(f"❌ Node.js 18+ required, found {version}")
|
||||
return False
|
||||
print(f"✅ Node.js {version}")
|
||||
return True
|
||||
except FileNotFoundError:
|
||||
print("❌ Node.js not found")
|
||||
return False
|
||||
|
||||
def detect_gpu():
|
||||
"""Detect available GPU hardware."""
|
||||
gpus = {
|
||||
"nvidia": False,
|
||||
"nvidia_name": "",
|
||||
"intel": False,
|
||||
"intel_name": "",
|
||||
"amd": False,
|
||||
"amd_name": ""
|
||||
}
|
||||
|
||||
# Check NVIDIA
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,driver_version,memory.total",
|
||||
"--format=csv,noheader"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info = result.stdout.strip()
|
||||
gpus["nvidia"] = True
|
||||
gpus["nvidia_name"] = info.split(",")[0].strip()
|
||||
print(f"✅ NVIDIA GPU: {info}")
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
print("ℹ️ No NVIDIA GPU detected")
|
||||
|
||||
# Check Intel/AMD via WMI (Windows)
|
||||
if platform.system() == "Windows":
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["wmic", "path", "win32_videocontroller", "get",
|
||||
"name,adapterram,driverversion", "/format:csv"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
for line in result.stdout.strip().split('\n'):
|
||||
if 'Intel' in line:
|
||||
gpus["intel"] = True
|
||||
gpus["intel_name"] = [x for x in line.split(',') if 'Intel' in x][0]
|
||||
print(f"✅ Intel GPU: {gpus['intel_name']}")
|
||||
elif 'AMD' in line or 'Radeon' in line:
|
||||
gpus["amd"] = True
|
||||
gpus["amd_name"] = [x for x in line.split(',') if 'AMD' in x or 'Radeon' in x][0]
|
||||
print(f"✅ AMD GPU: {gpus['amd_name']}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return gpus
|
||||
|
||||
def install_core_dependencies():
|
||||
"""Install core Python dependencies."""
|
||||
print("\n📦 Installing core dependencies...")
|
||||
subprocess.run([
|
||||
sys.executable, "-m", "pip", "install", "-r", "requirements.txt",
|
||||
"--quiet", "--no-warn-script-location"
|
||||
], check=True)
|
||||
print("✅ Core dependencies installed")
|
||||
|
||||
def install_gpu_dependencies(gpus: dict):
|
||||
"""Install GPU-specific dependencies based on detected hardware."""
|
||||
print("\n🎮 Setting up GPU acceleration...")
|
||||
|
||||
gpu_installed = False
|
||||
|
||||
# NVIDIA — install CuPy (includes CUDA runtime)
|
||||
if gpus["nvidia"]:
|
||||
print(f" Installing CuPy for {gpus['nvidia_name']}...")
|
||||
try:
|
||||
# Try CUDA 12 first (newer cards)
|
||||
subprocess.run([
|
||||
sys.executable, "-m", "pip", "install", "cupy-cuda12x",
|
||||
"--quiet", "--no-warn-script-location"
|
||||
], check=True, timeout=300)
|
||||
print(f" ✅ CuPy (CUDA 12) installed for {gpus['nvidia_name']}")
|
||||
gpu_installed = True
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
||||
try:
|
||||
# Fallback to CUDA 11
|
||||
subprocess.run([
|
||||
sys.executable, "-m", "pip", "install", "cupy-cuda11x",
|
||||
"--quiet", "--no-warn-script-location"
|
||||
], check=True, timeout=300)
|
||||
print(f" ✅ CuPy (CUDA 11) installed for {gpus['nvidia_name']}")
|
||||
gpu_installed = True
|
||||
except Exception as e:
|
||||
print(f" ⚠️ CuPy installation failed: {e}")
|
||||
print(f" 💡 Manual install: pip install cupy-cuda12x")
|
||||
|
||||
# Intel/AMD — install PyOpenCL
|
||||
if gpus["intel"] or gpus["amd"]:
|
||||
gpu_name = gpus["intel_name"] or gpus["amd_name"]
|
||||
print(f" Installing PyOpenCL for {gpu_name}...")
|
||||
try:
|
||||
subprocess.run([
|
||||
sys.executable, "-m", "pip", "install", "pyopencl",
|
||||
"--quiet", "--no-warn-script-location"
|
||||
], check=True, timeout=120)
|
||||
print(f" ✅ PyOpenCL installed for {gpu_name}")
|
||||
gpu_installed = True
|
||||
except Exception as e:
|
||||
print(f" ⚠️ PyOpenCL installation failed: {e}")
|
||||
print(f" 💡 Manual install: pip install pyopencl")
|
||||
|
||||
if not gpu_installed:
|
||||
print(" ℹ️ No GPU acceleration available — using CPU (NumPy)")
|
||||
print(" 💡 This is fine! GPU just makes large calculations faster.")
|
||||
|
||||
return gpu_installed
|
||||
|
||||
def install_frontend():
|
||||
"""Install frontend dependencies and build."""
|
||||
print("\n🌐 Setting up frontend...")
|
||||
frontend_dir = os.path.join(os.path.dirname(__file__), "frontend")
|
||||
|
||||
if os.path.exists(os.path.join(frontend_dir, "package.json")):
|
||||
subprocess.run(["npm", "install"], cwd=frontend_dir, check=True)
|
||||
subprocess.run(["npm", "run", "build"], cwd=frontend_dir, check=True)
|
||||
print("✅ Frontend built")
|
||||
else:
|
||||
print("⚠️ Frontend directory not found")
|
||||
|
||||
def download_terrain_data():
|
||||
"""Pre-download SRTM terrain tiles for Ukraine."""
|
||||
print("\n🏔️ Checking terrain data...")
|
||||
cache_dir = os.path.expanduser("~/.rfcp/terrain")
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
|
||||
# Ukraine bounding box: lat 44-53, lon 22-41
|
||||
# SRTM tiles needed for typical use
|
||||
required_tiles = [
|
||||
# Lviv oblast area (common test area)
|
||||
"N49E025", "N49E024", "N49E026",
|
||||
"N50E025", "N50E024", "N50E026",
|
||||
# Dnipro area
|
||||
"N48E034", "N48E035",
|
||||
"N49E034", "N49E035",
|
||||
]
|
||||
|
||||
existing = [f.replace(".hgt", "") for f in os.listdir(cache_dir) if f.endswith(".hgt")]
|
||||
missing = [t for t in required_tiles if t not in existing]
|
||||
|
||||
if missing:
|
||||
print(f" {len(missing)} terrain tiles needed (auto-download on first use)")
|
||||
else:
|
||||
print(f" ✅ {len(existing)} terrain tiles cached")
|
||||
|
||||
def create_launcher():
|
||||
"""Create desktop shortcut / launcher script."""
|
||||
print("\n🚀 Creating launcher...")
|
||||
|
||||
if platform.system() == "Windows":
|
||||
# Create .bat launcher
|
||||
launcher = os.path.join(os.path.dirname(__file__), "RFCP.bat")
|
||||
with open(launcher, 'w') as f:
|
||||
f.write('@echo off\n')
|
||||
f.write('title RFCP - RF Coverage Planner\n')
|
||||
f.write('echo Starting RFCP...\n')
|
||||
f.write(f'cd /d "{os.path.dirname(__file__)}"\n')
|
||||
f.write(f'"{sys.executable}" -m uvicorn backend.app.main:app --host 0.0.0.0 --port 8888\n')
|
||||
print(f" ✅ Launcher created: {launcher}")
|
||||
|
||||
return True
|
||||
|
||||
def verify_installation():
|
||||
"""Run quick verification tests."""
|
||||
print("\n🔍 Verifying installation...")
|
||||
|
||||
checks = []
|
||||
|
||||
# Check core imports
|
||||
try:
|
||||
import numpy as np
|
||||
checks.append(f"✅ NumPy {np.__version__}")
|
||||
except ImportError:
|
||||
checks.append("❌ NumPy missing")
|
||||
|
||||
try:
|
||||
import scipy
|
||||
checks.append(f"✅ SciPy {scipy.__version__}")
|
||||
except ImportError:
|
||||
checks.append("❌ SciPy missing")
|
||||
|
||||
try:
|
||||
import fastapi
|
||||
checks.append(f"✅ FastAPI {fastapi.__version__}")
|
||||
except ImportError:
|
||||
checks.append("❌ FastAPI missing")
|
||||
|
||||
try:
|
||||
import shapely
|
||||
checks.append(f"✅ Shapely {shapely.__version__}")
|
||||
except ImportError:
|
||||
checks.append("⚠️ Shapely missing (boundary features disabled)")
|
||||
|
||||
# Check GPU
|
||||
try:
|
||||
import cupy as cp
|
||||
device = cp.cuda.Device(0)
|
||||
checks.append(f"✅ CuPy → {device.name} ({device.mem_info[1]//1024//1024} MB)")
|
||||
except ImportError:
|
||||
checks.append("ℹ️ CuPy not available")
|
||||
except Exception as e:
|
||||
checks.append(f"⚠️ CuPy error: {e}")
|
||||
|
||||
try:
|
||||
import pyopencl as cl
|
||||
devices = []
|
||||
for p in cl.get_platforms():
|
||||
for d in p.get_devices():
|
||||
devices.append(d.name)
|
||||
checks.append(f"✅ PyOpenCL → {', '.join(devices)}")
|
||||
except ImportError:
|
||||
checks.append("ℹ️ PyOpenCL not available")
|
||||
except Exception as e:
|
||||
checks.append(f"⚠️ PyOpenCL error: {e}")
|
||||
|
||||
for check in checks:
|
||||
print(f" {check}")
|
||||
|
||||
return all("❌" not in c for c in checks)
|
||||
|
||||
def main():
|
||||
"""Main installer entry point."""
|
||||
print("=" * 60)
|
||||
print(" RFCP — RF Coverage Planner — Installer")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
# Step 1: Check prerequisites
|
||||
print("📋 Checking prerequisites...")
|
||||
if not check_python():
|
||||
sys.exit(1)
|
||||
check_node()
|
||||
|
||||
# Step 2: Detect GPU
|
||||
gpus = detect_gpu()
|
||||
|
||||
# Step 3: Install dependencies
|
||||
install_core_dependencies()
|
||||
install_gpu_dependencies(gpus)
|
||||
|
||||
# Step 4: Frontend
|
||||
install_frontend()
|
||||
|
||||
# Step 5: Terrain data
|
||||
download_terrain_data()
|
||||
|
||||
# Step 6: Launcher
|
||||
create_launcher()
|
||||
|
||||
# Step 7: Verify
|
||||
print()
|
||||
success = verify_installation()
|
||||
|
||||
# Summary
|
||||
print()
|
||||
print("=" * 60)
|
||||
if success:
|
||||
print(" ✅ RFCP installed successfully!")
|
||||
print()
|
||||
print(" To start RFCP:")
|
||||
print(" python -m uvicorn backend.app.main:app --port 8888")
|
||||
print(" Then open: http://localhost:8888")
|
||||
print()
|
||||
if gpus["nvidia"]:
|
||||
print(f" 🎮 GPU: {gpus['nvidia_name']} (CUDA)")
|
||||
elif gpus["intel"] or gpus["amd"]:
|
||||
gpu_name = gpus["intel_name"] or gpus["amd_name"]
|
||||
print(f" 🎮 GPU: {gpu_name} (OpenCL)")
|
||||
else:
|
||||
print(" 💻 Mode: CPU only")
|
||||
else:
|
||||
print(" ⚠️ Installation completed with warnings")
|
||||
print(" Some features may be limited")
|
||||
print("=" * 60)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Electron-Builder / NSIS Packaging
|
||||
|
||||
### For .exe Installer
|
||||
|
||||
```yaml
|
||||
# electron-builder.yml
|
||||
|
||||
appId: com.rfcp.coverage-planner
|
||||
productName: "RFCP - RF Coverage Planner"
|
||||
copyright: "RFCP 2026"
|
||||
|
||||
directories:
|
||||
output: dist
|
||||
buildResources: build
|
||||
|
||||
files:
|
||||
- "backend/**/*"
|
||||
- "frontend/dist/**/*"
|
||||
- "requirements.txt"
|
||||
- "install_rfcp.py"
|
||||
- "!**/*.pyc"
|
||||
- "!**/node_modules/**"
|
||||
- "!**/venv/**"
|
||||
|
||||
extraResources:
|
||||
- from: "python-embedded/"
|
||||
to: "python/"
|
||||
- from: "terrain-data/"
|
||||
to: "terrain/"
|
||||
|
||||
win:
|
||||
target:
|
||||
- target: nsis
|
||||
arch: [x64]
|
||||
icon: "build/icon.ico"
|
||||
|
||||
nsis:
|
||||
oneClick: false
|
||||
allowToChangeInstallationDirectory: true
|
||||
installerIcon: "build/icon.ico"
|
||||
license: "LICENSE.md"
|
||||
|
||||
# Custom NSIS script for GPU detection
|
||||
include: "build/gpu-detect.nsh"
|
||||
|
||||
# Install steps:
|
||||
# 1. Extract files
|
||||
# 2. Run install_rfcp.py (detects GPU, installs deps)
|
||||
# 3. Create Start Menu shortcuts
|
||||
# 4. Create Desktop shortcut
|
||||
```
|
||||
|
||||
### Portable Version (.zip)
|
||||
|
||||
```
|
||||
RFCP-Portable/
|
||||
├── RFCP.bat # Main launcher
|
||||
├── install.bat # First-time setup
|
||||
├── backend/
|
||||
│ ├── app/
|
||||
│ │ ├── main.py
|
||||
│ │ ├── api/
|
||||
│ │ ├── services/
|
||||
│ │ └── models/
|
||||
│ └── requirements.txt
|
||||
├── frontend/
|
||||
│ └── dist/ # Pre-built frontend
|
||||
├── python/ # Embedded Python (optional)
|
||||
│ ├── python.exe
|
||||
│ └── Lib/
|
||||
├── terrain/ # Pre-cached .hgt files
|
||||
│ ├── N49E025.hgt
|
||||
│ └── ...
|
||||
├── data/
|
||||
│ ├── osm_cache.db # SQLite cache (created on first run)
|
||||
│ └── config.json # User settings
|
||||
└── README.md
|
||||
```
|
||||
|
||||
### install.bat (First-Time Setup)
|
||||
|
||||
```batch
|
||||
@echo off
|
||||
title RFCP - First Time Setup
|
||||
echo ============================================
|
||||
echo RFCP - RF Coverage Planner - Setup
|
||||
echo ============================================
|
||||
echo.
|
||||
|
||||
REM Check if Python exists
|
||||
python --version >nul 2>&1
|
||||
if errorlevel 1 (
|
||||
echo ERROR: Python not found!
|
||||
echo Please install Python 3.10+ from python.org
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
REM Run installer
|
||||
python install_rfcp.py
|
||||
|
||||
echo.
|
||||
echo Setup complete! Run RFCP.bat to start.
|
||||
pause
|
||||
```
|
||||
|
||||
### RFCP.bat (Launcher)
|
||||
|
||||
```batch
|
||||
@echo off
|
||||
title RFCP - RF Coverage Planner
|
||||
cd /d "%~dp0"
|
||||
|
||||
REM Check if installed
|
||||
if not exist "backend\app\main.py" (
|
||||
echo ERROR: RFCP not found. Run install.bat first.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo Starting RFCP...
|
||||
echo Open http://localhost:8888 in your browser
|
||||
echo Press Ctrl+C to stop
|
||||
echo.
|
||||
|
||||
python -m uvicorn backend.app.main:app --host 0.0.0.0 --port 8888
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Dependency Size Estimates
|
||||
|
||||
| Component | Size |
|
||||
|-----------|------|
|
||||
| Python (embedded) | ~30 MB |
|
||||
| Core pip packages | ~80 MB |
|
||||
| CuPy + CUDA runtime | ~700 MB |
|
||||
| PyOpenCL | ~15 MB |
|
||||
| Frontend (built) | ~5 MB |
|
||||
| SRTM terrain (Ukraine) | ~300 MB |
|
||||
| **Total (with CUDA)** | **~1.1 GB** |
|
||||
| **Total (CPU only)** | **~415 MB** |
|
||||
|
||||
---
|
||||
|
||||
## Runtime Requirements
|
||||
|
||||
| Resource | Minimum | Recommended |
|
||||
|----------|---------|-------------|
|
||||
| RAM | 4 GB | 8+ GB |
|
||||
| Disk | 500 MB | 2 GB (with terrain cache) |
|
||||
| CPU | 4 cores | 8+ cores |
|
||||
| GPU | - | NVIDIA GTX 1060+ / Intel UHD 630+ |
|
||||
| OS | Windows 10 | Windows 10/11 64-bit |
|
||||
| Python | 3.10 | 3.11+ |
|
||||
| Node.js | 18 | 20 LTS |
|
||||
|
||||
---
|
||||
|
||||
## Auto-Update Mechanism (Future)
|
||||
|
||||
```python
|
||||
# Check for updates on startup
|
||||
async def check_for_updates():
|
||||
try:
|
||||
response = await httpx.get(
|
||||
"https://api.github.com/repos/user/rfcp/releases/latest",
|
||||
timeout=5
|
||||
)
|
||||
latest = response.json()["tag_name"]
|
||||
current = get_current_version()
|
||||
|
||||
if latest != current:
|
||||
return {
|
||||
"update_available": True,
|
||||
"current": current,
|
||||
"latest": latest,
|
||||
"download_url": response.json()["assets"][0]["browser_download_url"]
|
||||
}
|
||||
except:
|
||||
pass
|
||||
return {"update_available": False}
|
||||
```
|
||||
@@ -0,0 +1,516 @@
|
||||
# RFCP — Iteration 3.10.5: WebGL Smooth Coverage Interpolation
|
||||
|
||||
**Date:** February 6, 2026
|
||||
**Priority:** P1 (Major Visual Improvement)
|
||||
**Estimated Time:** 3-4 hours
|
||||
**Author:** Claude (Opus 4.5) for Олег @ UMTC
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Replace the current grid-based square coverage visualization with smooth WebGL-interpolated rendering. Currently coverage is displayed as discrete colored squares which looks "pixelated" and unrealistic. Professional RF tools like CloudRF use smooth gradients that interpolate between measurement points.
|
||||
|
||||
**Current State:** Grid squares at 50m/200m resolution → blocky appearance
|
||||
**Target State:** Smooth bilinear/bicubic interpolation → professional gradient appearance
|
||||
|
||||
---
|
||||
|
||||
## Problem Description
|
||||
|
||||
### Current Implementation
|
||||
- Coverage points are rendered as discrete squares on a Leaflet canvas layer
|
||||
- Each grid point (lat, lon, rsrp) → one colored square
|
||||
- Resolution determines square size (50m = small squares, 200m = large squares)
|
||||
- Result: Looks like Minecraft, not like professional RF planning software
|
||||
|
||||
### Desired Outcome
|
||||
- Smooth color transitions between coverage points
|
||||
- GPU-accelerated rendering via WebGL
|
||||
- No visible grid artifacts
|
||||
- Performance maintained or improved (GPU does interpolation)
|
||||
- Same data, better visualization
|
||||
|
||||
---
|
||||
|
||||
## Technical Approach
|
||||
|
||||
### Option A: WebGL Fragment Shader (RECOMMENDED)
|
||||
|
||||
Use a WebGL fragment shader that:
|
||||
1. Receives coverage points as a texture or uniform array
|
||||
2. For each screen pixel, finds nearest coverage points
|
||||
3. Performs bilinear interpolation between them
|
||||
4. Outputs smoothly interpolated color
|
||||
|
||||
**Pros:**
|
||||
- Best visual quality
|
||||
- GPU-accelerated (fast)
|
||||
- Scales to any resolution
|
||||
- Industry standard approach
|
||||
|
||||
**Cons:**
|
||||
- More complex implementation
|
||||
- Requires WebGL knowledge
|
||||
|
||||
### Option B: Canvas with Gaussian Blur
|
||||
|
||||
Apply Gaussian blur to the existing canvas after rendering squares.
|
||||
|
||||
**Pros:**
|
||||
- Simple to implement
|
||||
- Works with existing code
|
||||
|
||||
**Cons:**
|
||||
- Blurs edges (coverage boundary becomes fuzzy)
|
||||
- Not true interpolation
|
||||
- Performance overhead
|
||||
|
||||
### Option C: Pre-interpolate on CPU
|
||||
|
||||
Generate more points by interpolating between existing ones before rendering.
|
||||
|
||||
**Pros:**
|
||||
- Simpler rendering
|
||||
- Works with existing canvas
|
||||
|
||||
**Cons:**
|
||||
- Much slower (CPU-bound)
|
||||
- Memory intensive
|
||||
- Not scalable
|
||||
|
||||
**DECISION: Implement Option A (WebGL Fragment Shader)**
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: WebGL Layer Setup
|
||||
|
||||
**File:** `frontend/src/components/map/WebGLCoverageLayer.tsx`
|
||||
|
||||
Create a new Leaflet layer that uses WebGL for rendering:
|
||||
|
||||
```typescript
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useMap } from 'react-leaflet';
|
||||
import L from 'leaflet';
|
||||
|
||||
interface CoveragePoint {
|
||||
lat: number;
|
||||
lon: number;
|
||||
rsrp: number;
|
||||
}
|
||||
|
||||
interface WebGLCoverageLayerProps {
|
||||
points: CoveragePoint[];
|
||||
opacity: number;
|
||||
minRsrp: number;
|
||||
maxRsrp: number;
|
||||
visible: boolean;
|
||||
}
|
||||
|
||||
export default function WebGLCoverageLayer({
|
||||
points,
|
||||
opacity,
|
||||
minRsrp,
|
||||
maxRsrp,
|
||||
visible
|
||||
}: WebGLCoverageLayerProps) {
|
||||
const map = useMap();
|
||||
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
||||
const glRef = useRef<WebGLRenderingContext | null>(null);
|
||||
const programRef = useRef<WebGLProgram | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!visible || points.length === 0) return;
|
||||
|
||||
// Create canvas overlay
|
||||
const canvas = document.createElement('canvas');
|
||||
const container = map.getContainer();
|
||||
canvas.width = container.clientWidth;
|
||||
canvas.height = container.clientHeight;
|
||||
canvas.style.position = 'absolute';
|
||||
canvas.style.top = '0';
|
||||
canvas.style.left = '0';
|
||||
canvas.style.pointerEvents = 'none';
|
||||
canvas.style.zIndex = '400'; // Above tiles, below markers
|
||||
canvas.style.opacity = String(opacity);
|
||||
container.appendChild(canvas);
|
||||
canvasRef.current = canvas;
|
||||
|
||||
// Initialize WebGL
|
||||
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl');
|
||||
if (!gl) {
|
||||
console.error('WebGL not supported, falling back to canvas');
|
||||
return;
|
||||
}
|
||||
glRef.current = gl as WebGLRenderingContext;
|
||||
|
||||
// Setup shaders and render
|
||||
initShaders(gl as WebGLRenderingContext);
|
||||
render();
|
||||
|
||||
// Handle map move/zoom
|
||||
const onMove = () => render();
|
||||
map.on('move', onMove);
|
||||
map.on('zoom', onMove);
|
||||
map.on('resize', onResize);
|
||||
|
||||
return () => {
|
||||
map.off('move', onMove);
|
||||
map.off('zoom', onMove);
|
||||
map.off('resize', onResize);
|
||||
canvas.remove();
|
||||
};
|
||||
}, [points, visible, opacity, minRsrp, maxRsrp, map]);
|
||||
|
||||
// ... shader init and render functions
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 2: WebGL Shaders
|
||||
|
||||
**Vertex Shader:**
|
||||
```glsl
|
||||
attribute vec2 a_position;
|
||||
varying vec2 v_texCoord;
|
||||
|
||||
void main() {
|
||||
gl_Position = vec4(a_position, 0.0, 1.0);
|
||||
v_texCoord = (a_position + 1.0) / 2.0;
|
||||
}
|
||||
```
|
||||
|
||||
**Fragment Shader (Bilinear Interpolation):**
|
||||
```glsl
|
||||
precision mediump float;
|
||||
|
||||
uniform sampler2D u_coverageTexture;
|
||||
uniform vec2 u_resolution;
|
||||
uniform vec4 u_bounds; // minLat, minLon, maxLat, maxLon
|
||||
uniform float u_minRsrp;
|
||||
uniform float u_maxRsrp;
|
||||
|
||||
varying vec2 v_texCoord;
|
||||
|
||||
// RSRP to color gradient (matches existing palette)
|
||||
vec3 rsrpToColor(float rsrp) {
|
||||
float t = clamp((rsrp - u_minRsrp) / (u_maxRsrp - u_minRsrp), 0.0, 1.0);
|
||||
|
||||
// Color stops: red -> orange -> yellow -> green -> cyan -> blue
|
||||
// Reversed: strong signal = green/cyan, weak = red/orange
|
||||
if (t < 0.2) {
|
||||
return mix(vec3(0.5, 0.0, 0.0), vec3(1.0, 0.0, 0.0), t / 0.2); // maroon -> red
|
||||
} else if (t < 0.4) {
|
||||
return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), (t - 0.2) / 0.2); // red -> orange
|
||||
} else if (t < 0.6) {
|
||||
return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.4) / 0.2); // orange -> yellow
|
||||
} else if (t < 0.8) {
|
||||
return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.6) / 0.2); // yellow -> green
|
||||
} else {
|
||||
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.8) / 0.2); // green -> cyan
|
||||
}
|
||||
}
|
||||
|
||||
void main() {
|
||||
// Convert screen coords to geographic coords
|
||||
vec2 geoCoord = mix(u_bounds.xy, u_bounds.zw, v_texCoord);
|
||||
|
||||
// Sample coverage texture (contains RSRP values encoded as colors)
|
||||
vec4 sample = texture2D(u_coverageTexture, v_texCoord);
|
||||
|
||||
// Decode RSRP from texture (R channel = normalized RSRP)
|
||||
float rsrp = mix(u_minRsrp, u_maxRsrp, sample.r);
|
||||
|
||||
// Skip if no coverage (alpha = 0)
|
||||
if (sample.a < 0.1) {
|
||||
discard;
|
||||
}
|
||||
|
||||
vec3 color = rsrpToColor(rsrp);
|
||||
gl_FragColor = vec4(color, sample.a);
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Coverage Data → Texture
|
||||
|
||||
Convert coverage points array to a WebGL texture for GPU sampling:
|
||||
|
||||
```typescript
|
||||
function createCoverageTexture(
|
||||
gl: WebGLRenderingContext,
|
||||
points: CoveragePoint[],
|
||||
bounds: L.LatLngBounds,
|
||||
textureSize: number = 512
|
||||
): WebGLTexture {
|
||||
// Create a grid texture from sparse points
|
||||
const data = new Uint8Array(textureSize * textureSize * 4);
|
||||
|
||||
const minLat = bounds.getSouth();
|
||||
const maxLat = bounds.getNorth();
|
||||
const minLon = bounds.getWest();
|
||||
const maxLon = bounds.getEast();
|
||||
|
||||
// For each texture pixel, find nearest coverage point and interpolate
|
||||
for (let y = 0; y < textureSize; y++) {
|
||||
for (let x = 0; x < textureSize; x++) {
|
||||
const lat = minLat + (maxLat - minLat) * (y / textureSize);
|
||||
const lon = minLon + (maxLon - minLon) * (x / textureSize);
|
||||
|
||||
// Find nearest points and interpolate (IDW - Inverse Distance Weighting)
|
||||
const { value, weight } = interpolateIDW(points, lat, lon, 4);
|
||||
|
||||
const idx = (y * textureSize + x) * 4;
|
||||
if (weight > 0) {
|
||||
// Encode normalized RSRP in R channel, weight in A channel
|
||||
const normalized = (value - minRsrp) / (maxRsrp - minRsrp);
|
||||
data[idx] = Math.floor(normalized * 255); // R = RSRP
|
||||
data[idx + 1] = 0; // G = unused
|
||||
data[idx + 2] = 0; // B = unused
|
||||
data[idx + 3] = Math.floor(Math.min(weight, 1) * 255); // A = coverage mask
|
||||
} else {
|
||||
data[idx + 3] = 0; // No coverage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const texture = gl.createTexture();
|
||||
gl.bindTexture(gl.TEXTURE_2D, texture);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureSize, textureSize, 0, gl.RGBA, gl.UNSIGNED_BYTE, data);
|
||||
|
||||
// Enable bilinear filtering for smooth interpolation
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
|
||||
return texture!;
|
||||
}
|
||||
|
||||
// Inverse Distance Weighting interpolation
|
||||
function interpolateIDW(
|
||||
points: CoveragePoint[],
|
||||
lat: number,
|
||||
lon: number,
|
||||
k: number = 4,
|
||||
power: number = 2
|
||||
): { value: number; weight: number } {
|
||||
// Find k nearest points
|
||||
const distances = points.map((p, i) => ({
|
||||
index: i,
|
||||
dist: Math.sqrt(Math.pow(p.lat - lat, 2) + Math.pow(p.lon - lon, 2))
|
||||
}));
|
||||
|
||||
distances.sort((a, b) => a.dist - b.dist);
|
||||
const nearest = distances.slice(0, k);
|
||||
|
||||
// If very close to a point, use its value directly
|
||||
if (nearest[0].dist < 0.0001) {
|
||||
return { value: points[nearest[0].index].rsrp, weight: 1 };
|
||||
}
|
||||
|
||||
// IDW formula: weighted average where weight = 1 / distance^power
|
||||
let sumWeights = 0;
|
||||
let sumValues = 0;
|
||||
|
||||
for (const n of nearest) {
|
||||
const w = 1 / Math.pow(n.dist, power);
|
||||
sumWeights += w;
|
||||
sumValues += w * points[n.index].rsrp;
|
||||
}
|
||||
|
||||
// Limit interpolation range (don't extrapolate too far from data)
|
||||
const maxDist = nearest[nearest.length - 1].dist;
|
||||
const coverage = maxDist < 0.01 ? 1 : Math.max(0, 1 - maxDist * 50);
|
||||
|
||||
return {
|
||||
value: sumValues / sumWeights,
|
||||
weight: coverage
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Integration with Existing Code
|
||||
|
||||
**Modify:** `frontend/src/components/map/MapView.tsx`
|
||||
|
||||
Add toggle between old canvas layer and new WebGL layer:
|
||||
|
||||
```typescript
|
||||
import WebGLCoverageLayer from './WebGLCoverageLayer';
|
||||
|
||||
// In MapView component:
|
||||
const [useWebGL, setUseWebGL] = useState(true);
|
||||
|
||||
// In render:
|
||||
{useWebGL ? (
|
||||
<WebGLCoverageLayer
|
||||
points={coveragePoints}
|
||||
opacity={heatmapOpacity}
|
||||
minRsrp={-130}
|
||||
maxRsrp={-50}
|
||||
visible={showCoverage}
|
||||
/>
|
||||
) : (
|
||||
<GeographicHeatmap ... /> // Existing canvas implementation
|
||||
)}
|
||||
```
|
||||
|
||||
**Add setting:** `frontend/src/components/panels/SettingsPanel.tsx`
|
||||
|
||||
```typescript
|
||||
<div className="flex items-center justify-between">
|
||||
<span>Smooth Coverage (WebGL)</span>
|
||||
<Toggle
|
||||
checked={useWebGL}
|
||||
onChange={setUseWebGL}
|
||||
/>
|
||||
</div>
|
||||
```
|
||||
|
||||
### Phase 5: Performance Optimizations
|
||||
|
||||
1. **Texture Caching:** Only regenerate texture when coverage data changes
|
||||
2. **Resolution Scaling:** Use smaller texture on zoom out, larger on zoom in
|
||||
3. **Frustum Culling:** Don't render points outside visible bounds
|
||||
4. **Web Worker:** Move IDW interpolation to background thread
|
||||
|
||||
```typescript
|
||||
// Memoize texture generation
|
||||
const coverageTexture = useMemo(() => {
|
||||
if (!gl || points.length === 0) return null;
|
||||
return createCoverageTexture(gl, points, bounds, textureSize);
|
||||
}, [points, bounds, textureSize]);
|
||||
|
||||
// Dynamic texture size based on zoom
|
||||
const textureSize = useMemo(() => {
|
||||
const zoom = map.getZoom();
|
||||
if (zoom < 10) return 256;
|
||||
if (zoom < 14) return 512;
|
||||
return 1024;
|
||||
}, [map.getZoom()]);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Files to Create/Modify
|
||||
|
||||
| File | Action | Description |
|
||||
|------|--------|-------------|
|
||||
| `frontend/src/components/map/WebGLCoverageLayer.tsx` | CREATE | New WebGL rendering component |
|
||||
| `frontend/src/components/map/shaders/coverage.vert` | CREATE | Vertex shader (optional, can inline) |
|
||||
| `frontend/src/components/map/shaders/coverage.frag` | CREATE | Fragment shader (optional, can inline) |
|
||||
| `frontend/src/components/map/MapView.tsx` | MODIFY | Add WebGL layer toggle |
|
||||
| `frontend/src/store/settings.ts` | MODIFY | Add useWebGL setting |
|
||||
| `frontend/src/components/panels/CoverageSettingsPanel.tsx` | MODIFY | Add WebGL toggle UI |
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
### Visual Quality
|
||||
- [ ] No visible grid squares at any zoom level
|
||||
- [ ] Smooth color gradients between coverage points
|
||||
- [ ] Coverage boundary is smooth, not jagged
|
||||
- [ ] Colors match existing palette (weak = red, strong = cyan/green)
|
||||
- [ ] Opacity control works correctly
|
||||
|
||||
### Performance
|
||||
- [ ] 60 FPS during map pan/zoom
|
||||
- [ ] Initial render < 500ms for 6000 points
|
||||
- [ ] Memory usage reasonable (< 100MB for large coverage)
|
||||
- [ ] No GPU memory leaks on repeated calculations
|
||||
|
||||
### Compatibility
|
||||
- [ ] Works on systems without dedicated GPU (falls back gracefully)
|
||||
- [ ] Works in Chrome, Firefox, Edge
|
||||
- [ ] Works on both high-DPI and standard displays
|
||||
|
||||
### Integration
|
||||
- [ ] Toggle between WebGL and canvas modes works
|
||||
- [ ] Coverage data updates correctly after recalculation
|
||||
- [ ] Settings persist across sessions
|
||||
- [ ] No console errors or warnings
|
||||
|
||||
---
|
||||
|
||||
## Fallback Strategy
|
||||
|
||||
If WebGL fails to initialize:
|
||||
1. Log warning to console
|
||||
2. Fall back to existing canvas implementation
|
||||
3. Show toast notification to user
|
||||
|
||||
```typescript
|
||||
const gl = canvas.getContext('webgl');
|
||||
if (!gl) {
|
||||
console.warn('WebGL not available, using canvas fallback');
|
||||
setUseWebGL(false);
|
||||
toast.warning('WebGL not supported, using standard rendering');
|
||||
return;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. **Visual:** Coverage looks like CloudRF/professional tools — smooth gradients, no grid
|
||||
2. **Performance:** Same or better than current canvas implementation
|
||||
3. **Reliability:** Graceful fallback if WebGL unavailable
|
||||
4. **UX:** User can toggle between modes in settings
|
||||
|
||||
---
|
||||
|
||||
## Additional Notes
|
||||
|
||||
### Color Gradient Reference
|
||||
|
||||
Current RSRP color mapping (from `colorGradient.ts`):
|
||||
```
|
||||
-130 dBm → Maroon (no service)
|
||||
-110 dBm → Red (very weak)
|
||||
-100 dBm → Orange (weak)
|
||||
-85 dBm → Yellow (fair)
|
||||
-70 dBm → Green (good)
|
||||
-50 dBm → Cyan (excellent)
|
||||
```
|
||||
|
||||
### Coordinate Systems
|
||||
|
||||
- **Geographic:** Latitude/Longitude (EPSG:4326)
|
||||
- **Screen:** Pixels from top-left
|
||||
- **WebGL:** Normalized device coordinates (-1 to 1)
|
||||
- **Texture:** UV coordinates (0 to 1)
|
||||
|
||||
All conversions must account for Web Mercator projection distortion.
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- WebGL Fundamentals: https://webglfundamentals.org/
|
||||
- Leaflet Custom Layers: https://leafletjs.com/examples/extending/extending-2-layers.html
|
||||
- IDW Interpolation: https://en.wikipedia.org/wiki/Inverse_distance_weighting
|
||||
- CloudRF visualization: https://cloudrf.com (for visual reference)
|
||||
|
||||
---
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
feat(coverage): WebGL smooth interpolation rendering
|
||||
|
||||
- Add WebGLCoverageLayer with GPU-accelerated rendering
|
||||
- Implement IDW interpolation for smooth gradients
|
||||
- Add toggle between WebGL and canvas modes
|
||||
- Graceful fallback for systems without WebGL support
|
||||
|
||||
Closes #coverage-interpolation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Ready for Implementation!**
|
||||
|
||||
@@ -0,0 +1,439 @@
|
||||
# RFCP Iteration 3.4.0 — Large Radius Support (20-50km)
|
||||
|
||||
## Goal
|
||||
|
||||
Enable 50km radius calculations without OOM by implementing memory-efficient processing patterns.
|
||||
|
||||
**Current limitation:** > 10-20km radius causes OOM (5+ GB RAM usage)
|
||||
**Target:** 50km radius with < 4GB RAM peak
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Memory-Mapped Terrain
|
||||
|
||||
### 1.1 Terrain mmap Loading
|
||||
|
||||
Change terrain_service to use memory-mapped files instead of loading full arrays into RAM.
|
||||
|
||||
**File:** `backend/app/services/terrain_service.py`
|
||||
|
||||
```python
|
||||
# Before (loads ~25 MB per tile into RAM):
|
||||
terrain = np.fromfile(f, dtype='>i2').reshape((rows, cols))
|
||||
|
||||
# After (near-zero RAM, OS pages from disk):
|
||||
terrain = np.memmap(f, dtype='>i2', mode='r', shape=(rows, cols))
|
||||
```
|
||||
|
||||
**Expected impact:** -200-400 MB RAM per tile
|
||||
|
||||
### 1.2 Terrain Disk Cache
|
||||
|
||||
- Save downloaded .hgt files to persistent disk cache
|
||||
- Don't keep raw arrays in memory after initial processing
|
||||
- Implement LRU eviction if cache exceeds 2GB
|
||||
- Location: `~/.rfcp/terrain_cache/`
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Tile-Based Processing
|
||||
|
||||
### 2.1 Split Large Calculations
|
||||
|
||||
If radius > 10km, split calculation area into 5km sub-tiles.
|
||||
|
||||
**File:** `backend/app/services/coverage_service.py` (or new `tile_processor.py`)
|
||||
|
||||
```python
|
||||
def calculate_coverage_tiled(site, radius_m, resolution_m, settings):
|
||||
"""Tile-based calculation for large radius."""
|
||||
|
||||
# Small radius — use existing single-pass
|
||||
if radius_m <= 10000:
|
||||
return calculate_coverage_single(site, radius_m, resolution_m, settings)
|
||||
|
||||
# Large radius — split into tiles
|
||||
TILE_SIZE = 5000 # 5km tiles
|
||||
tiles = generate_tile_grid(site.lat, site.lon, radius_m, TILE_SIZE)
|
||||
|
||||
all_results = []
|
||||
|
||||
for i, tile in enumerate(tiles):
|
||||
log(f"Processing tile {i+1}/{len(tiles)}: {tile.bbox}")
|
||||
|
||||
# Load data for this tile only
|
||||
tile_terrain = load_terrain_for_bbox(tile.bbox)
|
||||
tile_buildings = load_buildings_for_bbox(tile.bbox)
|
||||
|
||||
# Calculate coverage for tile
|
||||
tile_points = generate_grid_for_tile(tile, resolution_m)
|
||||
tile_results = calculate_points(tile_points, site, settings,
|
||||
tile_terrain, tile_buildings)
|
||||
|
||||
all_results.extend(tile_results)
|
||||
|
||||
# Free memory
|
||||
del tile_terrain, tile_buildings
|
||||
gc.collect()
|
||||
|
||||
# Report progress
|
||||
progress = (i + 1) / len(tiles) * 100
|
||||
yield_progress(progress, f"Tile {i+1}/{len(tiles)}")
|
||||
|
||||
return merge_and_dedupe_results(all_results)
|
||||
|
||||
|
||||
def generate_tile_grid(center_lat, center_lon, radius_m, tile_size_m):
|
||||
"""Generate grid of tiles covering the calculation area."""
|
||||
tiles = []
|
||||
|
||||
# Calculate bbox of full area
|
||||
lat_delta = radius_m / 111000
|
||||
lon_delta = radius_m / (111000 * cos(radians(center_lat)))
|
||||
|
||||
# Generate tile grid
|
||||
n_tiles = ceil(radius_m * 2 / tile_size_m)
|
||||
|
||||
for i in range(n_tiles):
|
||||
for j in range(n_tiles):
|
||||
tile_bbox = calculate_tile_bbox(center_lat, center_lon,
|
||||
i, j, n_tiles, tile_size_m)
|
||||
|
||||
# Only include tiles that intersect with coverage circle
|
||||
if tile_intersects_circle(tile_bbox, center_lat, center_lon, radius_m):
|
||||
tiles.append(Tile(bbox=tile_bbox, index=(i, j)))
|
||||
|
||||
return tiles
|
||||
```
|
||||
|
||||
### 2.2 Progressive Results via WebSocket
|
||||
|
||||
Send results per-tile as they complete, so user sees coverage growing.
|
||||
|
||||
**File:** `backend/app/api/websocket.py`
|
||||
|
||||
```python
|
||||
async def calculate_coverage_ws(websocket, params):
|
||||
for tile_results in calculate_coverage_tiled_generator(params):
|
||||
# Send partial results
|
||||
await websocket.send_json({
|
||||
"type": "partial_results",
|
||||
"points": tile_results.points,
|
||||
"progress": tile_results.progress,
|
||||
"tile": tile_results.tile_index,
|
||||
"status": f"Tile {tile_results.tile_index} complete"
|
||||
})
|
||||
|
||||
# Final message
|
||||
await websocket.send_json({
|
||||
"type": "complete",
|
||||
"total_points": total_points,
|
||||
"computation_time": elapsed
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: SQLite Cache for OSM Data
|
||||
|
||||
### 3.1 Create Local Database
|
||||
|
||||
Replace in-memory OSM cache with SQLite database with spatial indexing.
|
||||
|
||||
**File:** `backend/app/services/cache_db.py` (NEW)
|
||||
|
||||
```python
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
class OSMCacheDB:
|
||||
def __init__(self, db_path="~/.rfcp/osm_cache.db"):
|
||||
self.conn = sqlite3.connect(db_path)
|
||||
self._init_tables()
|
||||
|
||||
def _init_tables(self):
|
||||
self.conn.executescript("""
|
||||
CREATE TABLE IF NOT EXISTS buildings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
osm_id TEXT UNIQUE,
|
||||
lat REAL NOT NULL,
|
||||
lon REAL NOT NULL,
|
||||
height REAL DEFAULT 10.0,
|
||||
geometry TEXT, -- GeoJSON
|
||||
cell_key TEXT, -- grid cell for batch loading
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_buildings_lat ON buildings(lat);
|
||||
CREATE INDEX IF NOT EXISTS idx_buildings_lon ON buildings(lon);
|
||||
CREATE INDEX IF NOT EXISTS idx_buildings_cell ON buildings(cell_key);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS vegetation (
|
||||
id INTEGER PRIMARY KEY,
|
||||
osm_id TEXT UNIQUE,
|
||||
lat REAL NOT NULL,
|
||||
lon REAL NOT NULL,
|
||||
type TEXT,
|
||||
geometry TEXT,
|
||||
cell_key TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_veg_lat ON vegetation(lat);
|
||||
CREATE INDEX IF NOT EXISTS idx_veg_lon ON vegetation(lon);
|
||||
|
||||
-- Metadata for cache invalidation
|
||||
CREATE TABLE IF NOT EXISTS cache_meta (
|
||||
cell_key TEXT PRIMARY KEY,
|
||||
data_type TEXT,
|
||||
fetched_at TIMESTAMP,
|
||||
item_count INTEGER
|
||||
);
|
||||
""")
|
||||
self.conn.commit()
|
||||
|
||||
def query_buildings_bbox(self, min_lat, max_lat, min_lon, max_lon, limit=20000):
|
||||
"""Query buildings within bounding box."""
|
||||
cursor = self.conn.execute("""
|
||||
SELECT osm_id, lat, lon, height, geometry
|
||||
FROM buildings
|
||||
WHERE lat BETWEEN ? AND ?
|
||||
AND lon BETWEEN ? AND ?
|
||||
LIMIT ?
|
||||
""", (min_lat, max_lat, min_lon, max_lon, limit))
|
||||
|
||||
return [self._row_to_building(row) for row in cursor]
|
||||
|
||||
def insert_buildings(self, buildings, cell_key):
|
||||
"""Bulk insert buildings from OSM fetch."""
|
||||
self.conn.executemany("""
|
||||
INSERT OR IGNORE INTO buildings
|
||||
(osm_id, lat, lon, height, geometry, cell_key)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""", [
|
||||
(b['id'], b['lat'], b['lon'], b.get('height', 10),
|
||||
json.dumps(b.get('geometry')), cell_key)
|
||||
for b in buildings
|
||||
])
|
||||
self.conn.commit()
|
||||
|
||||
def is_cell_cached(self, cell_key, data_type, max_age_hours=24):
|
||||
"""Check if cell data is cached and fresh."""
|
||||
cursor = self.conn.execute("""
|
||||
SELECT fetched_at FROM cache_meta
|
||||
WHERE cell_key = ? AND data_type = ?
|
||||
AND fetched_at > datetime('now', ?)
|
||||
""", (cell_key, data_type, f'-{max_age_hours} hours'))
|
||||
|
||||
return cursor.fetchone() is not None
|
||||
```
|
||||
|
||||
### 3.2 Update OSM Client
|
||||
|
||||
Modify OSM client to use SQLite cache.
|
||||
|
||||
**File:** `backend/app/services/osm_client.py`
|
||||
|
||||
```python
|
||||
class OSMClient:
|
||||
def __init__(self):
|
||||
self.cache_db = OSMCacheDB()
|
||||
|
||||
def get_buildings(self, bbox, max_count=20000):
|
||||
min_lat, min_lon, max_lat, max_lon = bbox
|
||||
cell_key = self._bbox_to_cell_key(bbox)
|
||||
|
||||
# Check cache first
|
||||
if self.cache_db.is_cell_cached(cell_key, 'buildings'):
|
||||
return self.cache_db.query_buildings_bbox(
|
||||
min_lat, max_lat, min_lon, max_lon, max_count
|
||||
)
|
||||
|
||||
# Fetch from Overpass API
|
||||
buildings = self._fetch_from_overpass(bbox, 'buildings')
|
||||
|
||||
# Store in cache
|
||||
self.cache_db.insert_buildings(buildings, cell_key)
|
||||
|
||||
return buildings[:max_count]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Worker Memory Optimization
|
||||
|
||||
### 4.1 Per-Tile Building Loading
|
||||
|
||||
Workers receive only tile bbox and query buildings themselves (or receive pre-filtered list).
|
||||
|
||||
```python
|
||||
def _pool_worker_tiled(args):
|
||||
"""Worker that loads buildings for its tile only."""
|
||||
tile_bbox, terrain_shm_refs, config = args
|
||||
|
||||
# Load only buildings for this tile
|
||||
cache_db = OSMCacheDB()
|
||||
buildings = cache_db.query_buildings_bbox(*tile_bbox, limit=5000)
|
||||
|
||||
# Much smaller memory footprint per worker
|
||||
# ...rest of calculation
|
||||
```
|
||||
|
||||
### 4.2 Adaptive Worker Count
|
||||
|
||||
Reduce workers for large radius to prevent combined memory explosion.
|
||||
|
||||
```python
|
||||
def get_worker_count_for_radius(radius_m, base_workers):
|
||||
"""Scale down workers for large calculations."""
|
||||
if radius_m > 30000:
|
||||
return min(base_workers, 2)
|
||||
elif radius_m > 20000:
|
||||
return min(base_workers, 3)
|
||||
elif radius_m > 10000:
|
||||
return min(base_workers, 4)
|
||||
return base_workers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Frontend Progressive Rendering
|
||||
|
||||
### 5.1 Accumulate Partial Results
|
||||
|
||||
**File:** `frontend/src/store/coverage.ts`
|
||||
|
||||
```typescript
|
||||
interface CoverageState {
|
||||
points: CoveragePoint[];
|
||||
isCalculating: boolean;
|
||||
progress: number;
|
||||
// NEW:
|
||||
partialResults: CoveragePoint[];
|
||||
tilesCompleted: number;
|
||||
totalTiles: number;
|
||||
}
|
||||
|
||||
// Handle partial results
|
||||
case 'partial_results':
|
||||
set(state => ({
|
||||
partialResults: [...state.partialResults, ...message.points],
|
||||
progress: message.progress,
|
||||
tilesCompleted: state.tilesCompleted + 1
|
||||
}));
|
||||
break;
|
||||
|
||||
case 'complete':
|
||||
set(state => ({
|
||||
points: state.partialResults, // Finalize
|
||||
partialResults: [],
|
||||
isCalculating: false
|
||||
}));
|
||||
break;
|
||||
```
|
||||
|
||||
### 5.2 Incremental Heatmap Render
|
||||
|
||||
**File:** `frontend/src/components/map/CoverageHeatmap.tsx`
|
||||
|
||||
```typescript
|
||||
function CoverageHeatmap() {
|
||||
const { points, partialResults, isCalculating } = useCoverageStore();
|
||||
|
||||
// Show partial results while calculating
|
||||
const displayPoints = isCalculating ? partialResults : points;
|
||||
|
||||
// Throttle re-renders during streaming (every 500 points)
|
||||
const throttledPoints = useThrottle(displayPoints, 500);
|
||||
|
||||
return <HeatmapLayer points={throttledPoints} />;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
### Priority 1 — Biggest Impact
|
||||
1. **Tile-based processing** (Phase 2.1) — enables large radius
|
||||
2. **SQLite cache** (Phase 3) — reduces memory, speeds up repeated calcs
|
||||
|
||||
### Priority 2 — Memory Reduction
|
||||
3. **Terrain mmap** (Phase 1.1) — easy win, minimal code change
|
||||
4. **Per-tile building loading** (Phase 4.1)
|
||||
|
||||
### Priority 3 — UX Improvement
|
||||
5. **Progressive WebSocket** (Phase 2.2)
|
||||
6. **Frontend streaming** (Phase 5)
|
||||
|
||||
### Priority 4 — Polish
|
||||
7. **Terrain disk cache** (Phase 1.2)
|
||||
8. **Adaptive worker count** (Phase 4.2)
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
| Radius | Max Time | Max RAM |
|
||||
|--------|----------|---------|
|
||||
| 20 km | < 3 min | < 3 GB |
|
||||
| 30 km | < 5 min | < 3.5 GB |
|
||||
| 50 km | < 10 min | < 4 GB |
|
||||
|
||||
- No OOM crashes at any radius up to 50km
|
||||
- Progressive results visible within 30s of starting
|
||||
- Cache reuse speeds up repeated calculations 5-10x
|
||||
|
||||
---
|
||||
|
||||
## Files to Modify
|
||||
|
||||
### Backend (Python)
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `terrain_service.py` | mmap loading, disk cache |
|
||||
| `coverage_service.py` | tile-based routing |
|
||||
| `parallel_coverage_service.py` | adaptive workers |
|
||||
| `osm_client.py` | SQLite integration |
|
||||
| `websocket.py` | streaming results |
|
||||
| **NEW** `tile_processor.py` | tile generation & processing |
|
||||
| **NEW** `cache_db.py` | SQLite cache layer |
|
||||
|
||||
### Frontend (TypeScript)
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `store/coverage.ts` | partial results handling |
|
||||
| `CoverageHeatmap.tsx` | incremental rendering |
|
||||
| `App.tsx` | progress for tiled calc |
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
# Test 20km radius
|
||||
curl -X POST http://localhost:8888/api/coverage/calculate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"radius": 20000, "resolution": 500, "preset": "standard"}'
|
||||
|
||||
# Monitor memory
|
||||
watch -n 1 'ps aux | grep rfcp-server | awk "{print \$6/1024\" MB\"}"'
|
||||
|
||||
# Test 50km radius
|
||||
curl -X POST http://localhost:8888/api/coverage/calculate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"radius": 50000, "resolution": 1000, "preset": "standard"}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- Tile size 5km is a balance — smaller = more overhead, larger = more memory
|
||||
- SQLite R-tree extension would be faster but requires compilation
|
||||
- For Rust version, all of this will be native and faster
|
||||
|
||||
---
|
||||
|
||||
*"Think in tiles, stream results, cache everything"* 🗺️
|
||||
1096
docs/devlog/gpu_supp/RFCP-Iteration-3.5.0-GPU-Acceleration.md
Normal file
1096
docs/devlog/gpu_supp/RFCP-Iteration-3.5.0-GPU-Acceleration.md
Normal file
File diff suppressed because it is too large
Load Diff
557
docs/devlog/gpu_supp/RFCP-Iteration-3.5.1-Bugfixes-Polish.md
Normal file
557
docs/devlog/gpu_supp/RFCP-Iteration-3.5.1-Bugfixes-Polish.md
Normal file
@@ -0,0 +1,557 @@
|
||||
# RFCP Iteration 3.5.1 — Bugfixes & Polish
|
||||
|
||||
## Overview
|
||||
|
||||
Focused bugfix and polish release addressing UI issues, coverage boundary accuracy, history improvements, and GPU indicator fixes discovered during 3.5.0 testing.
|
||||
|
||||
---
|
||||
|
||||
## 1. GPU — Detection Not Working + UI Overlap
|
||||
|
||||
### 1A. GPU Not Detected Despite Being Available
|
||||
|
||||
**Problem:** User has a laptop with DUAL GPUs (Intel integrated + NVIDIA discrete) but the app only shows "CPU (NumPy)". GPU acceleration is not working at all — no GPU option available in the device selector.
|
||||
|
||||
**Root cause investigation needed:**
|
||||
1. Check if CuPy is actually installed in the Python environment
|
||||
2. Check if CUDA toolkit is accessible from the app's runtime
|
||||
3. Check if PyOpenCL is installed (fallback for Intel GPU)
|
||||
4. The backend GPU detection may be failing silently
|
||||
|
||||
**Debug steps to add:**
|
||||
|
||||
```python
|
||||
# backend/app/services/gpu_backend.py — improve detection with logging
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@classmethod
|
||||
def detect_backends(cls) -> list:
|
||||
backends = []
|
||||
|
||||
# Check NVIDIA CUDA
|
||||
try:
|
||||
import cupy as cp
|
||||
count = cp.cuda.runtime.getDeviceCount()
|
||||
logger.info(f"CUDA detected: {count} device(s)")
|
||||
for i in range(count):
|
||||
device = cp.cuda.Device(i)
|
||||
backends.append({...})
|
||||
except ImportError:
|
||||
logger.warning("CuPy not installed — run: pip install cupy-cuda12x")
|
||||
except Exception as e:
|
||||
logger.warning(f"CUDA detection failed: {e}")
|
||||
|
||||
# Check OpenCL (works with Intel, AMD, AND NVIDIA)
|
||||
try:
|
||||
import pyopencl as cl
|
||||
platforms = cl.get_platforms()
|
||||
logger.info(f"OpenCL detected: {len(platforms)} platform(s)")
|
||||
for platform in platforms:
|
||||
for device in platform.get_devices():
|
||||
logger.info(f" OpenCL device: {device.name}")
|
||||
backends.append({...})
|
||||
except ImportError:
|
||||
logger.warning("PyOpenCL not installed — run: pip install pyopencl")
|
||||
except Exception as e:
|
||||
logger.warning(f"OpenCL detection failed: {e}")
|
||||
|
||||
# Always log what was found
|
||||
logger.info(f"Total compute backends: {len(backends)} "
|
||||
f"({sum(1 for b in backends if b['type'] == 'cuda')} CUDA, "
|
||||
f"{sum(1 for b in backends if b['type'] == 'opencl')} OpenCL)")
|
||||
|
||||
# CPU always available
|
||||
backends.append({...cpu...})
|
||||
return backends
|
||||
```
|
||||
|
||||
**Installation check endpoint:**
|
||||
|
||||
```python
|
||||
# backend/app/api/routes/gpu.py — add diagnostic endpoint
|
||||
|
||||
@router.get("/diagnostics")
|
||||
async def gpu_diagnostics():
|
||||
"""Full GPU diagnostic info for troubleshooting."""
|
||||
diag = {
|
||||
"python_version": sys.version,
|
||||
"platform": platform.platform(),
|
||||
"cuda": {},
|
||||
"opencl": {},
|
||||
"numpy": {}
|
||||
}
|
||||
|
||||
# Check CuPy/CUDA
|
||||
try:
|
||||
import cupy
|
||||
diag["cuda"]["cupy_version"] = cupy.__version__
|
||||
diag["cuda"]["cuda_version"] = cupy.cuda.runtime.runtimeGetVersion()
|
||||
diag["cuda"]["device_count"] = cupy.cuda.runtime.getDeviceCount()
|
||||
for i in range(diag["cuda"]["device_count"]):
|
||||
d = cupy.cuda.Device(i)
|
||||
diag["cuda"][f"device_{i}"] = {
|
||||
"name": d.name,
|
||||
"compute_capability": d.compute_capability,
|
||||
"total_memory_mb": d.mem_info[1] // 1024 // 1024
|
||||
}
|
||||
except ImportError:
|
||||
diag["cuda"]["error"] = "CuPy not installed"
|
||||
diag["cuda"]["install_hint"] = "pip install cupy-cuda12x --break-system-packages"
|
||||
except Exception as e:
|
||||
diag["cuda"]["error"] = str(e)
|
||||
|
||||
# Check PyOpenCL
|
||||
try:
|
||||
import pyopencl as cl
|
||||
diag["opencl"]["pyopencl_version"] = cl.VERSION_TEXT
|
||||
for p in cl.get_platforms():
|
||||
platform_info = {"name": p.name, "devices": []}
|
||||
for d in p.get_devices():
|
||||
platform_info["devices"].append({
|
||||
"name": d.name,
|
||||
"type": cl.device_type.to_string(d.type),
|
||||
"memory_mb": d.global_mem_size // 1024 // 1024,
|
||||
"compute_units": d.max_compute_units
|
||||
})
|
||||
diag["opencl"][p.name] = platform_info
|
||||
except ImportError:
|
||||
diag["opencl"]["error"] = "PyOpenCL not installed"
|
||||
diag["opencl"]["install_hint"] = "pip install pyopencl"
|
||||
except Exception as e:
|
||||
diag["opencl"]["error"] = str(e)
|
||||
|
||||
# Check NumPy
|
||||
import numpy as np
|
||||
diag["numpy"]["version"] = np.__version__
|
||||
|
||||
return diag
|
||||
```
|
||||
|
||||
**Frontend — show diagnostic info:**
|
||||
|
||||
```typescript
|
||||
// In GPUIndicator.tsx — when only CPU detected, show help
|
||||
|
||||
{devices.length === 1 && devices[0].type === 'cpu' && (
|
||||
<div className="text-xs text-yellow-400 mt-2 p-2 bg-yellow-900/20 rounded">
|
||||
⚠ No GPU detected.
|
||||
<button
|
||||
onClick={() => fetchDiagnostics()}
|
||||
className="underline ml-1"
|
||||
>
|
||||
Run diagnostics
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
```
|
||||
|
||||
**Auto-install hint in UI:**
|
||||
```
|
||||
⚠ No GPU detected
|
||||
|
||||
For NVIDIA GPU: pip install cupy-cuda12x
|
||||
For Intel/AMD GPU: pip install pyopencl
|
||||
|
||||
[Run Diagnostics] [Install Guide]
|
||||
```
|
||||
|
||||
**Dual GPU handling (Intel + NVIDIA laptop):**
|
||||
```python
|
||||
# When both Intel (OpenCL) and NVIDIA (CUDA) found:
|
||||
# - List both in device selector
|
||||
# - Default to NVIDIA CUDA (faster)
|
||||
# - Allow user to switch
|
||||
# - Intel iGPU via OpenCL is still ~3-5x faster than CPU
|
||||
|
||||
# Example device list for dual GPU laptop:
|
||||
# 1. ⚡ NVIDIA GeForce RTX 4060 (CUDA) — 8 GB [DEFAULT]
|
||||
# 2. ⚡ Intel UHD Graphics 770 (OpenCL) — shared memory
|
||||
# 3. 💻 CPU (16 cores)
|
||||
```
|
||||
|
||||
### 1B. GPU Indicator UI — Fix Overlap with Fit Button
|
||||
|
||||
**Problem:** GPU device dropdown overlaps with the "Fit" button in top-right corner.
|
||||
|
||||
**Solution:**
|
||||
- Keep compact "⚡ CPU" badge in header
|
||||
- Dropdown opens to the LEFT or DOWNWARD, not overlapping map controls
|
||||
- Proper z-index and positioning
|
||||
- Shorter labels: "CPU" not "CPU (NumPy)"
|
||||
|
||||
**Files:**
|
||||
- `frontend/src/components/ui/GPUIndicator.tsx`
|
||||
- `backend/app/services/gpu_backend.py`
|
||||
- `backend/app/api/routes/gpu.py`
|
||||
|
||||
---
|
||||
|
||||
## 2. Coverage Boundary — Improve Accuracy
|
||||
|
||||
**Problem:** Current boundary shows a rough circle/ellipse shape that doesn't follow actual coverage contour.
|
||||
|
||||
**Current behavior:** Boundary seems to be based on simple distance radius rather than actual RSRP threshold contour.
|
||||
|
||||
**Expected behavior:** Boundary should follow the actual -100 dBm (or configured threshold) contour line — an irregular shape that follows terrain, buildings, vegetation shadows.
|
||||
|
||||
**Solution:**
|
||||
|
||||
```python
|
||||
# Backend approach: Generate contour from actual RSRP grid
|
||||
|
||||
import numpy as np
|
||||
from scipy.ndimage import binary_dilation, binary_erosion
|
||||
from shapely.geometry import MultiPoint
|
||||
from shapely.ops import unary_union
|
||||
|
||||
def calculate_coverage_boundary(points: list, threshold_dbm: float = -100) -> list:
|
||||
"""
|
||||
Calculate coverage boundary as convex hull of points above threshold.
|
||||
Returns list of [lat, lon] coordinates forming the boundary polygon.
|
||||
"""
|
||||
# Filter points above threshold
|
||||
valid_points = [(p['lat'], p['lon']) for p in points if p['rsrp'] >= threshold_dbm]
|
||||
|
||||
if len(valid_points) < 3:
|
||||
return []
|
||||
|
||||
# Create concave hull (alpha shape) for realistic boundary
|
||||
# Concave hull follows the actual shape better than convex hull
|
||||
from shapely.geometry import MultiPoint
|
||||
multi_point = MultiPoint(valid_points)
|
||||
|
||||
# Alpha shape — adjust alpha for detail level
|
||||
# Higher alpha = more detailed (but slower)
|
||||
boundary = concave_hull(multi_point, ratio=0.3)
|
||||
|
||||
if boundary.is_empty:
|
||||
return []
|
||||
|
||||
# Simplify to reduce points (tolerance in degrees ≈ 100m)
|
||||
simplified = boundary.simplify(0.001)
|
||||
|
||||
# Return as coordinate list
|
||||
coords = list(simplified.exterior.coords)
|
||||
return [[lat, lon] for lat, lon in coords]
|
||||
```
|
||||
|
||||
```python
|
||||
# Alternative: Grid-based contour approach
|
||||
|
||||
def calculate_boundary_from_grid(
|
||||
grid_points: list,
|
||||
threshold_dbm: float,
|
||||
grid_resolution_m: float
|
||||
) -> list:
|
||||
"""
|
||||
Create boundary by finding edge cells of coverage area.
|
||||
More accurate than hull — follows actual coverage gaps.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
# Build 2D RSRP grid
|
||||
lats = sorted(set(p['lat'] for p in grid_points))
|
||||
lons = sorted(set(p['lon'] for p in grid_points))
|
||||
|
||||
grid = np.full((len(lats), len(lons)), np.nan)
|
||||
lat_idx = {lat: i for i, lat in enumerate(lats)}
|
||||
lon_idx = {lon: i for i, lon in enumerate(lons)}
|
||||
|
||||
for p in grid_points:
|
||||
i = lat_idx[p['lat']]
|
||||
j = lon_idx[p['lon']]
|
||||
grid[i, j] = p['rsrp']
|
||||
|
||||
# Binary mask: above threshold
|
||||
mask = grid >= threshold_dbm
|
||||
|
||||
# Find boundary: dilate - original = edge cells
|
||||
dilated = binary_dilation(mask)
|
||||
boundary_mask = dilated & ~mask
|
||||
|
||||
# Extract boundary coordinates
|
||||
boundary_coords = []
|
||||
for i in range(len(lats)):
|
||||
for j in range(len(lons)):
|
||||
if boundary_mask[i, j]:
|
||||
boundary_coords.append([lats[i], lons[j]])
|
||||
|
||||
# Order points for polygon (traveling salesman approximate)
|
||||
if len(boundary_coords) > 2:
|
||||
ordered = order_boundary_points(boundary_coords)
|
||||
return ordered
|
||||
|
||||
return boundary_coords
|
||||
```
|
||||
|
||||
**Frontend changes:**
|
||||
- Receive boundary polygon from backend (already calculated with results)
|
||||
- Or calculate client-side from grid points
|
||||
- Render as Leaflet polygon with dashed white stroke
|
||||
- Should follow actual coverage shape, not circular approximation
|
||||
|
||||
**Files:**
|
||||
- `backend/app/services/coverage_service.py` — add boundary calculation
|
||||
- `frontend/src/components/map/CoverageBoundary.tsx` — render real contour
|
||||
|
||||
---
|
||||
|
||||
## 3. Session History — Show Propagation Parameters
|
||||
|
||||
**Problem:** History entries only show preset, points, radius, resolution. Missing propagation settings used.
|
||||
|
||||
**Solution:** Save full propagation config snapshot with each history entry.
|
||||
|
||||
```typescript
|
||||
// frontend/src/store/calcHistory.ts
|
||||
|
||||
interface HistoryEntry {
|
||||
id: string;
|
||||
timestamp: Date;
|
||||
computationTime: number;
|
||||
preset: string;
|
||||
radius: number;
|
||||
resolution: number;
|
||||
totalPoints: number;
|
||||
|
||||
// Coverage results
|
||||
coverage: {
|
||||
excellent: number; // percentage
|
||||
good: number;
|
||||
fair: number;
|
||||
weak: number;
|
||||
};
|
||||
avgRsrp: number;
|
||||
rangeMin: number;
|
||||
rangeMax: number;
|
||||
|
||||
// NEW: Propagation parameters snapshot
|
||||
propagation: {
|
||||
modelsUsed: string[]; // ["Free-Space", "terrain_los", ...]
|
||||
modelCount: number; // 12
|
||||
frequency: number; // 2100 MHz
|
||||
txPower: number; // 46 dBm
|
||||
antennaGain: number; // 15 dBi
|
||||
antennaHeight: number; // 10 m
|
||||
|
||||
// Environment
|
||||
season: string; // "Winter (30%)"
|
||||
temperature: string; // "15°C (mild)"
|
||||
humidity: string; // "50% (normal)"
|
||||
rainConditions: string; // "Light Rain"
|
||||
indoorCoverage: string; // "Medium Building (brick)"
|
||||
|
||||
// Margins
|
||||
fadingMargin: number; // 0 dB
|
||||
|
||||
// Atmospheric
|
||||
atmosphericAbsorption: boolean;
|
||||
};
|
||||
|
||||
// Site config
|
||||
sites: number; // 2
|
||||
sectors: number; // total sectors
|
||||
}
|
||||
```
|
||||
|
||||
**Display in History panel:**
|
||||
|
||||
```typescript
|
||||
// Expanded history entry shows propagation details
|
||||
|
||||
<div className="history-entry-expanded">
|
||||
{/* Existing: time, points, coverage bars */}
|
||||
|
||||
{/* NEW: Propagation summary (collapsed by default) */}
|
||||
<details className="mt-2">
|
||||
<summary className="text-xs text-gray-400 cursor-pointer hover:text-gray-300">
|
||||
▸ Propagation: {entry.propagation.modelCount} models, {entry.propagation.frequency} MHz
|
||||
</summary>
|
||||
<div className="mt-1 pl-3 text-xs text-gray-500 space-y-0.5">
|
||||
<div>TX: {entry.propagation.txPower} dBm, Gain: {entry.propagation.antennaGain} dBi</div>
|
||||
<div>Height: {entry.propagation.antennaHeight}m</div>
|
||||
<div>Environment: {entry.propagation.season}, {entry.propagation.rainConditions}</div>
|
||||
<div>Indoor: {entry.propagation.indoorCoverage}</div>
|
||||
{entry.propagation.fadingMargin > 0 && (
|
||||
<div>Fading margin: {entry.propagation.fadingMargin} dB</div>
|
||||
)}
|
||||
<div className="flex flex-wrap gap-1 mt-1">
|
||||
{entry.propagation.modelsUsed.map(model => (
|
||||
<span key={model} className="px-1 py-0.5 bg-slate-700 rounded text-[10px]">
|
||||
{model}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</div>
|
||||
```
|
||||
|
||||
**Files:**
|
||||
- `frontend/src/store/calcHistory.ts` — extend HistoryEntry type, save propagation
|
||||
- `frontend/src/components/panels/HistoryPanel.tsx` — show expandable propagation details
|
||||
- `backend/app/api/websocket.py` — include propagation config in result message
|
||||
- `backend/app/services/coverage_service.py` — return config snapshot with results
|
||||
|
||||
---
|
||||
|
||||
## 4. Results Popup — Show Propagation Summary
|
||||
|
||||
**Problem:** Calculation Complete popup shows time, points, coverage bars — but not which models were used.
|
||||
|
||||
**Solution:** Add compact propagation info to results popup.
|
||||
|
||||
```typescript
|
||||
// frontend/src/components/ui/ResultsPopup.tsx
|
||||
|
||||
// Add below coverage bars:
|
||||
<div className="mt-2 text-xs text-gray-400">
|
||||
<span>{result.modelsUsed?.length || 0} models</span>
|
||||
<span className="mx-1">•</span>
|
||||
<span>{result.frequency} MHz</span>
|
||||
{result.fadingMargin > 0 && (
|
||||
<>
|
||||
<span className="mx-1">•</span>
|
||||
<span>FM: {result.fadingMargin} dB</span>
|
||||
</>
|
||||
)}
|
||||
{result.indoorCoverage && result.indoorCoverage !== 'none' && (
|
||||
<>
|
||||
<span className="mx-1">•</span>
|
||||
<span>Indoor: {result.indoorCoverage}</span>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
```
|
||||
|
||||
**Files:**
|
||||
- `frontend/src/components/ui/ResultsPopup.tsx`
|
||||
|
||||
---
|
||||
|
||||
## 5. Batch Frequency Change (from 3.5.0 backlog)
|
||||
|
||||
**Problem:** To compare coverage at different frequencies, user must edit each sector manually.
|
||||
|
||||
**Solution:** Quick-change buttons in toolbar or Coverage Settings.
|
||||
|
||||
```typescript
|
||||
// frontend/src/components/panels/BatchOperations.tsx (NEW)
|
||||
|
||||
const QUICK_BANDS = [
|
||||
{ freq: 700, label: '700', band: 'B28', color: 'text-red-400' },
|
||||
{ freq: 800, label: '800', band: 'B20', color: 'text-orange-400' },
|
||||
{ freq: 900, label: '900', band: 'B8', color: 'text-yellow-400' },
|
||||
{ freq: 1800, label: '1800', band: 'B3', color: 'text-green-400' },
|
||||
{ freq: 2100, label: '2100', band: 'B1', color: 'text-blue-400' },
|
||||
{ freq: 2600, label: '2600', band: 'B7', color: 'text-purple-400' },
|
||||
{ freq: 3500, label: '3500', band: 'n78', color: 'text-pink-400' },
|
||||
];
|
||||
|
||||
export function BatchFrequencyChange() {
|
||||
return (
|
||||
<div className="p-3 border-t border-slate-700">
|
||||
<h4 className="text-xs font-semibold text-gray-400 mb-2">
|
||||
SET ALL SECTORS
|
||||
</h4>
|
||||
<div className="flex flex-wrap gap-1">
|
||||
{QUICK_BANDS.map(b => (
|
||||
<button
|
||||
key={b.freq}
|
||||
onClick={() => setAllSectorsFrequency(b.freq)}
|
||||
className="px-2 py-1 text-xs bg-slate-700 hover:bg-slate-600 rounded"
|
||||
title={`${b.band} — ${b.freq} MHz`}
|
||||
>
|
||||
<span className={b.color}>{b.label}</span>
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Location:** Below site list, above Coverage Settings.
|
||||
|
||||
**Files:**
|
||||
- `frontend/src/components/panels/BatchOperations.tsx` (NEW)
|
||||
- `frontend/src/store/sites.ts` — add `setAllSectorsFrequency()` action
|
||||
|
||||
---
|
||||
|
||||
## 6. Minor UI Fixes
|
||||
|
||||
### 6.1 Terrain Profile — Click Propagation (verify fix)
|
||||
- Verify that clicking "Terrain Profile" button no longer adds ruler point
|
||||
- If still broken: ensure e.stopPropagation() AND e.preventDefault() on button
|
||||
|
||||
### 6.2 GPU Indicator — Shorter Label
|
||||
- Current: "CPU (NumPy)" — too long
|
||||
- Should be: "CPU" or "⚡ CPU"
|
||||
- When GPU active: "⚡ RTX 4060" (short device name)
|
||||
|
||||
### 6.3 ~~Coordinate Display — Show Elevation~~ ✅ WORKS
|
||||
- Elevation loads on hover with delay — NOT a bug
|
||||
- Shows "Elev: 380m ASL" after holding cursor on map
|
||||
- No fix needed
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
### Priority 1 — Quick Fixes (30 min)
|
||||
- [ ] GPU indicator positioning (no overlap with Fit)
|
||||
- [ ] GPU detection — install CuPy/PyOpenCL, diagnostics endpoint
|
||||
- [ ] Terrain Profile click fix (verify)
|
||||
|
||||
### Priority 2 — History Enhancement (1 hour)
|
||||
- [ ] Extend HistoryEntry with propagation params
|
||||
- [ ] Save propagation snapshot on calculation complete
|
||||
- [ ] Expandable propagation details in History panel
|
||||
- [ ] Results popup — show model count + frequency
|
||||
|
||||
### Priority 3 — Coverage Boundary (1-2 hours)
|
||||
- [ ] Implement contour-based boundary from actual RSRP grid
|
||||
- [ ] Replace circular approximation with real coverage shape
|
||||
- [ ] Test with multi-site calculations
|
||||
- [ ] Smooth boundary line (simplify polygon)
|
||||
|
||||
### Priority 4 — Batch Frequency (30 min)
|
||||
- [ ] BatchOperations component
|
||||
- [ ] setAllSectorsFrequency store action
|
||||
- [ ] Wire into sidebar panel
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] GPU indicator does not overlap with any map controls
|
||||
- [ ] Coverage boundary follows actual coverage shape (not circular)
|
||||
- [ ] History entries show expandable propagation parameters
|
||||
- [ ] Results popup shows model count and frequency
|
||||
- [ ] Batch frequency change updates all sectors at once
|
||||
- [ ] Terrain Profile button click doesn't add ruler point
|
||||
- [ ] Elevation displays correctly in bottom-left
|
||||
|
||||
---
|
||||
|
||||
## Files Summary
|
||||
|
||||
### New Files
|
||||
- `frontend/src/components/panels/BatchOperations.tsx`
|
||||
|
||||
### Modified Files
|
||||
- `frontend/src/components/ui/GPUIndicator.tsx` — fix position/overlap
|
||||
- `frontend/src/components/map/CoverageBoundary.tsx` — real contour
|
||||
- `frontend/src/components/ui/ResultsPopup.tsx` — propagation info
|
||||
- `frontend/src/store/calcHistory.ts` — extended HistoryEntry
|
||||
- `frontend/src/components/panels/HistoryPanel.tsx` — expandable details
|
||||
- `frontend/src/store/sites.ts` — batch frequency action
|
||||
- `backend/app/services/coverage_service.py` — boundary calculation, config snapshot
|
||||
- `backend/app/api/websocket.py` — include config in results
|
||||
|
||||
---
|
||||
|
||||
*"Polish makes the difference between a tool and a product"* ✨
|
||||
504
docs/devlog/gpu_supp/RFCP-Iteration-3.5.2-Native-GPU-Polish.md
Normal file
504
docs/devlog/gpu_supp/RFCP-Iteration-3.5.2-Native-GPU-Polish.md
Normal file
@@ -0,0 +1,504 @@
|
||||
# RFCP — Iteration 3.5.2: Native Backend + GPU Fix + UI Polish
|
||||
|
||||
## Overview
|
||||
|
||||
Fix critical architecture issues: GPU indicator dropdown broken, GPU acceleration not working
|
||||
(CuPy in wrong Python environment), and prepare path to remove WSL2 dependency for end users.
|
||||
Plus UI polish items carried over from 3.5.1.
|
||||
|
||||
**Priority:** GPU fixes first, then UI polish, then native Windows exploration.
|
||||
|
||||
---
|
||||
|
||||
## CRITICAL CONTEXT
|
||||
|
||||
### Current Architecture Problem
|
||||
|
||||
```
|
||||
RFCP.exe (Electron, Windows)
|
||||
└── launches backend via WSL2:
|
||||
python3 -m uvicorn app.main:app --host 0.0.0.0 --port 8090
|
||||
└── /usr/bin/python3 (WSL2 system Python 3.12)
|
||||
└── NO venv, NO CuPy installed
|
||||
|
||||
User installed CuPy in Windows Python → backend doesn't see it.
|
||||
User installed CuPy in WSL system Python → needs --break-system-packages
|
||||
```
|
||||
|
||||
### GPU Hardware (Confirmed Working)
|
||||
|
||||
```
|
||||
nvidia-smi output (from WSL2):
|
||||
NVIDIA GeForce RTX 4060 Laptop GPU
|
||||
Driver: 581.42 (Windows) / 580.95.02 (WSL2)
|
||||
CUDA: 13.0
|
||||
VRAM: 8188 MiB
|
||||
GPU passthrough: WORKING ✅
|
||||
```
|
||||
|
||||
### Files to Reference
|
||||
|
||||
```
|
||||
backend/app/services/gpu_backend.py — GPUManager class
|
||||
backend/app/api/routes/gpu.py — GPU API endpoints
|
||||
frontend/src/components/ui/GPUIndicator.tsx — GPU badge/dropdown
|
||||
desktop/ — Electron app source
|
||||
installer/ — Build scripts
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Fix GPU Indicator Dropdown Z-Index (Priority 1 — 10 min)
|
||||
|
||||
### Problem
|
||||
GPU dropdown WORKS (opens on click, shows diagnostics, install hints) but renders
|
||||
BEHIND the right sidebar panel. The sidebar (Sites, Coverage Settings) has higher
|
||||
z-index than the GPU dropdown, so the dropdown is invisible/hidden underneath.
|
||||
|
||||
See screenshots: dropdown is partially visible only when sidebar is made very narrow.
|
||||
It shows: "COMPUTE DEVICES", "CPU (NumPy)", install hints, "Run Diagnostics",
|
||||
and even diagnostics JSON — all working but hidden behind sidebar.
|
||||
|
||||
### Root Cause
|
||||
GPUIndicator dropdown z-index is lower than the right sidebar panel z-index.
|
||||
|
||||
### Solution
|
||||
|
||||
In `GPUIndicator.tsx` — find the dropdown container div and set z-index
|
||||
higher than the sidebar:
|
||||
|
||||
```tsx
|
||||
{isOpen && (
|
||||
<div
|
||||
className="absolute top-full mt-1 bg-dark-surface border border-dark-border
|
||||
rounded-lg shadow-2xl p-3 min-w-[300px]"
|
||||
style={{ zIndex: 9999 }} // MUST be above sidebar (which is ~z-50 or z-auto)
|
||||
>
|
||||
...
|
||||
</div>
|
||||
)}
|
||||
```
|
||||
|
||||
**Key requirements:**
|
||||
1. `z-index: 9999` (or at minimum higher than sidebar)
|
||||
2. Position: dropdown should open to the LEFT (toward center of screen)
|
||||
to avoid being cut off by right edge
|
||||
3. `right-0` on the absolute positioning (anchored to right edge of badge)
|
||||
|
||||
**Alternative approach** — use Tailwind z-index:
|
||||
```tsx
|
||||
className="absolute top-full right-0 mt-1 z-[9999] ..."
|
||||
```
|
||||
|
||||
**Also check:** The parent container of GPUIndicator might need `position: relative`
|
||||
for absolute positioning to work correctly against the right sidebar.
|
||||
|
||||
### Testing
|
||||
- [ ] Click "CPU" badge → dropdown appears ABOVE the sidebar
|
||||
- [ ] Full dropdown visible: devices, install hints, diagnostics
|
||||
- [ ] Dropdown doesn't get cut off on right side
|
||||
- [ ] Click outside → dropdown closes
|
||||
- [ ] Dropdown works at any window width
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Install CuPy in WSL Backend (Priority 1 — 10 min)
|
||||
|
||||
### Problem
|
||||
CuPy installed in Windows Python, but backend runs in WSL2 system Python.
|
||||
|
||||
### Solution
|
||||
|
||||
Add a startup check in the backend that detects missing GPU packages
|
||||
and provides clear instructions. Also, the Electron app should try to
|
||||
install dependencies on first launch.
|
||||
|
||||
**Step 1: Backend startup GPU check**
|
||||
|
||||
In `backend/app/main.py`, add on startup:
|
||||
|
||||
```python
|
||||
@app.on_event("startup")
|
||||
async def check_gpu_availability():
|
||||
"""Log GPU status on startup for debugging."""
|
||||
import logging
|
||||
logger = logging.getLogger("rfcp.gpu")
|
||||
|
||||
# Check CuPy
|
||||
try:
|
||||
import cupy as cp
|
||||
device_count = cp.cuda.runtime.getDeviceCount()
|
||||
if device_count > 0:
|
||||
name = cp.cuda.Device(0).name
|
||||
mem = cp.cuda.Device(0).mem_info[1] // 1024 // 1024
|
||||
logger.info(f"✅ GPU detected: {name} ({mem} MB VRAM)")
|
||||
logger.info(f" CuPy {cp.__version__}, CUDA devices: {device_count}")
|
||||
else:
|
||||
logger.warning("⚠️ CuPy installed but no CUDA devices found")
|
||||
except ImportError:
|
||||
logger.warning("⚠️ CuPy not installed — GPU acceleration disabled")
|
||||
logger.warning(" Install: pip install cupy-cuda12x --break-system-packages")
|
||||
except Exception as e:
|
||||
logger.warning(f"⚠️ CuPy error: {e}")
|
||||
|
||||
# Check PyOpenCL
|
||||
try:
|
||||
import pyopencl as cl
|
||||
platforms = cl.get_platforms()
|
||||
for p in platforms:
|
||||
for d in p.get_devices():
|
||||
logger.info(f"✅ OpenCL device: {d.name.strip()}")
|
||||
except ImportError:
|
||||
logger.info("ℹ️ PyOpenCL not installed (optional)")
|
||||
except Exception:
|
||||
pass
|
||||
```
|
||||
|
||||
**Step 2: GPU diagnostics endpoint enhancement**
|
||||
|
||||
Enhance `/api/gpu/diagnostics` to return install commands:
|
||||
|
||||
```python
|
||||
@router.get("/diagnostics")
|
||||
async def gpu_diagnostics():
|
||||
import platform, sys
|
||||
|
||||
diagnostics = {
|
||||
"python": sys.version,
|
||||
"platform": platform.platform(),
|
||||
"executable": sys.executable,
|
||||
"is_wsl": "microsoft" in platform.release().lower(),
|
||||
"cuda_available": False,
|
||||
"opencl_available": False,
|
||||
"install_hint": "",
|
||||
"devices": []
|
||||
}
|
||||
|
||||
# Check nvidia-smi
|
||||
try:
|
||||
import subprocess
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
diagnostics["nvidia_smi"] = result.stdout.strip()
|
||||
except:
|
||||
diagnostics["nvidia_smi"] = "not found"
|
||||
|
||||
# Check CuPy
|
||||
try:
|
||||
import cupy
|
||||
diagnostics["cupy_version"] = cupy.__version__
|
||||
diagnostics["cuda_available"] = True
|
||||
count = cupy.cuda.runtime.getDeviceCount()
|
||||
for i in range(count):
|
||||
d = cupy.cuda.Device(i)
|
||||
diagnostics["devices"].append({
|
||||
"id": i,
|
||||
"name": d.name,
|
||||
"memory_mb": d.mem_info[1] // 1024 // 1024,
|
||||
"backend": "CUDA"
|
||||
})
|
||||
except ImportError:
|
||||
if diagnostics.get("is_wsl"):
|
||||
diagnostics["install_hint"] = "pip3 install cupy-cuda12x --break-system-packages"
|
||||
else:
|
||||
diagnostics["install_hint"] = "pip install cupy-cuda12x"
|
||||
|
||||
return diagnostics
|
||||
```
|
||||
|
||||
**Step 3: Frontend shows diagnostics clearly**
|
||||
|
||||
In GPUIndicator dropdown, show:
|
||||
```
|
||||
⚠ No GPU detected
|
||||
|
||||
Your system: WSL2 + NVIDIA RTX 4060
|
||||
|
||||
To enable GPU acceleration:
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ pip3 install cupy-cuda12x │
|
||||
│ --break-system-packages │
|
||||
└─────────────────────────────────────────────┘
|
||||
Then restart RFCP.
|
||||
|
||||
[Copy Command] [Run Diagnostics]
|
||||
```
|
||||
|
||||
### Testing
|
||||
- [ ] Backend startup logs GPU status
|
||||
- [ ] /api/gpu/diagnostics returns WSL detection + install hint
|
||||
- [ ] Frontend shows clear install instructions
|
||||
- [ ] After installing CuPy in WSL + restart → GPU appears in list
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Terrain Profile Click Fix (Priority 2 — 5 min)
|
||||
|
||||
### Problem
|
||||
Clicking "Terrain Profile" button in ruler measurement also adds a point on the map.
|
||||
|
||||
### Solution
|
||||
In the Terrain Profile button handler:
|
||||
|
||||
```tsx
|
||||
const handleTerrainProfile = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
// ... open terrain profile
|
||||
};
|
||||
```
|
||||
|
||||
Also check if the button is rendered inside a map click handler area —
|
||||
may need `L.DomEvent.disableClickPropagation(container)` on the parent.
|
||||
|
||||
### Testing
|
||||
- [ ] Click "Terrain Profile" → opens profile, NO new ruler point added
|
||||
- [ ] Map click still works normally when not clicking the button
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Coverage Boundary — Real Contour Shape (Priority 2 — 45 min)
|
||||
|
||||
### Problem
|
||||
Current boundary is a rough circle/ellipse. Should follow actual coverage contour.
|
||||
|
||||
### Approaches
|
||||
|
||||
**Option A: Shapely Alpha Shape (recommended)**
|
||||
|
||||
```python
|
||||
# backend/app/services/boundary_service.py
|
||||
|
||||
from shapely.geometry import MultiPoint
|
||||
from shapely.ops import unary_union
|
||||
import numpy as np
|
||||
|
||||
def calculate_coverage_boundary(points: list, threshold_dbm: float = -100) -> list:
|
||||
"""Calculate concave hull of coverage area above threshold."""
|
||||
|
||||
# Filter points above threshold
|
||||
valid = [(p['lon'], p['lat']) for p in points if p['rsrp'] >= threshold_dbm]
|
||||
|
||||
if len(valid) < 3:
|
||||
return []
|
||||
|
||||
mp = MultiPoint(valid)
|
||||
|
||||
# Use convex hull first, then try concave
|
||||
try:
|
||||
# Shapely 2.0+ has concave_hull
|
||||
from shapely import concave_hull
|
||||
hull = concave_hull(mp, ratio=0.3)
|
||||
except ImportError:
|
||||
# Fallback to convex hull
|
||||
hull = mp.convex_hull
|
||||
|
||||
# Simplify to reduce points (0.001 deg ≈ 100m)
|
||||
simplified = hull.simplify(0.001, preserve_topology=True)
|
||||
|
||||
# Extract coordinates
|
||||
if simplified.geom_type == 'Polygon':
|
||||
coords = list(simplified.exterior.coords)
|
||||
return [{'lat': c[1], 'lon': c[0]} for c in coords]
|
||||
|
||||
return []
|
||||
```
|
||||
|
||||
**Option B: Grid-based contour (simpler)**
|
||||
|
||||
```python
|
||||
def grid_contour_boundary(points: list, threshold_dbm: float, resolution: float):
|
||||
"""Find boundary by detecting edge cells in grid."""
|
||||
|
||||
# Create binary grid: 1 = above threshold, 0 = below
|
||||
# Find cells where 1 is adjacent to 0 = boundary
|
||||
# Convert cell coords back to lat/lon
|
||||
# Return ordered boundary points
|
||||
```
|
||||
|
||||
### API Endpoint
|
||||
|
||||
```python
|
||||
# Add to coverage calculation response
|
||||
@router.post("/coverage/calculate")
|
||||
async def calculate_coverage(...):
|
||||
result = coverage_service.calculate(...)
|
||||
|
||||
# Calculate boundary
|
||||
if result.points:
|
||||
boundary = calculate_coverage_boundary(
|
||||
result.points,
|
||||
threshold_dbm=settings.min_signal
|
||||
)
|
||||
result.boundary = boundary
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Frontend
|
||||
|
||||
```tsx
|
||||
// CoverageBoundary.tsx — use returned boundary coords
|
||||
// Instead of calculating alpha shape on frontend
|
||||
|
||||
const CoverageBoundary = ({ points, boundary }) => {
|
||||
// If server returned boundary, use it
|
||||
if (boundary && boundary.length > 0) {
|
||||
return <Polygon positions={boundary.map(p => [p.lat, p.lon])} />;
|
||||
}
|
||||
|
||||
// Fallback to current convex hull implementation
|
||||
return <CurrentImplementation points={points} />;
|
||||
};
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
Need `shapely` installed:
|
||||
```
|
||||
pip install shapely # or pip3 install shapely --break-system-packages
|
||||
```
|
||||
|
||||
Check if already in requirements.txt.
|
||||
|
||||
### Testing
|
||||
- [ ] 5km calculation → boundary follows actual coverage shape
|
||||
- [ ] 10km calculation → boundary is irregular (terrain-dependent)
|
||||
- [ ] Toggle boundary on/off works
|
||||
- [ ] Boundary doesn't crash with < 3 points
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Results Popup Enhancement (Priority 3 — 15 min)
|
||||
|
||||
### Problem
|
||||
Calculation complete toast/popup doesn't show which models were used.
|
||||
|
||||
### Solution
|
||||
Enhance the toast message after calculation:
|
||||
|
||||
```tsx
|
||||
// Current:
|
||||
toast.success(`Calculated ${points} points in ${time}s`);
|
||||
|
||||
// Enhanced:
|
||||
const modelCount = result.modelsUsed?.length ?? 0;
|
||||
const freq = sites[0]?.frequency ?? 0;
|
||||
const presetName = settings.preset ?? 'custom';
|
||||
|
||||
toast.success(
|
||||
`${points} pts • ${time}s • ${presetName} • ${freq} MHz • ${modelCount} models`,
|
||||
{ duration: 5000 }
|
||||
);
|
||||
```
|
||||
|
||||
### Testing
|
||||
- [ ] After calculation, toast shows: points, time, preset, frequency, model count
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Native Windows Backend (Priority 3 — Research/Plan)
|
||||
|
||||
### Problem
|
||||
Current setup REQUIRES WSL2. Users without WSL2 can't use RFCP at all.
|
||||
|
||||
### Current Flow
|
||||
```
|
||||
RFCP.exe (Electron)
|
||||
→ detects WSL2
|
||||
→ launches: wsl python3 -m uvicorn ...
|
||||
→ backend runs in WSL2 Linux
|
||||
```
|
||||
|
||||
### Target Flow
|
||||
```
|
||||
RFCP.exe (Electron)
|
||||
→ Option A: embedded Python (Windows native)
|
||||
→ Option B: detect system Python (Windows)
|
||||
→ Option C: keep WSL2 but with fallback
|
||||
```
|
||||
|
||||
### Research Tasks (don't implement yet, just investigate)
|
||||
|
||||
1. **Check how Electron currently launches backend:**
|
||||
```bash
|
||||
# Look at desktop/ directory
|
||||
cat desktop/src/main.ts # or main.js
|
||||
# Find where it spawns python/uvicorn
|
||||
```
|
||||
|
||||
2. **Check if Windows Python works for backend:**
|
||||
```powershell
|
||||
# In Windows PowerShell:
|
||||
cd D:\root\rfcp\backend
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8090
|
||||
# Does it start? What errors?
|
||||
```
|
||||
|
||||
3. **Evaluate embedded Python options:**
|
||||
- python-embedded (official, ~30 MB)
|
||||
- PyInstaller (bundle backend as .exe)
|
||||
- cx_Freeze
|
||||
- Nuitka (compile Python to C)
|
||||
|
||||
4. **Document findings** — create a brief report:
|
||||
```
|
||||
RFCP-Native-Backend-Research.md
|
||||
- Current architecture (WSL2 dependency)
|
||||
- Windows Python compatibility test results
|
||||
- Recommended approach
|
||||
- Migration steps
|
||||
- Timeline estimate
|
||||
```
|
||||
|
||||
### Goal
|
||||
User downloads RFCP.exe → installs → clicks icon → everything works.
|
||||
No WSL2. No manual pip install. GPU auto-detected.
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
### Priority 1 (30 min total)
|
||||
1. **Task 1:** Fix GPU dropdown — make it clickable again
|
||||
2. **Task 2:** GPU diagnostics + install instructions in UI
|
||||
3. **Task 3:** Terrain Profile click propagation fix
|
||||
|
||||
### Priority 2 (1 hour)
|
||||
4. **Task 4:** Coverage boundary real contour (shapely)
|
||||
5. **Task 5:** Results popup enhancement
|
||||
|
||||
### Priority 3 (Research only)
|
||||
6. **Task 6:** Investigate native Windows backend — report only, no implementation
|
||||
|
||||
---
|
||||
|
||||
## Build & Deploy
|
||||
|
||||
```bash
|
||||
# After implementation:
|
||||
cd /mnt/d/root/rfcp/frontend
|
||||
npx tsc --noEmit # TypeScript check
|
||||
npm run build # Production build
|
||||
|
||||
# Rebuild Electron:
|
||||
cd /mnt/d/root/rfcp/installer
|
||||
bash build-win.sh
|
||||
|
||||
# Test:
|
||||
# Install new .exe and verify GPU indicator works
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] GPU dropdown opens when clicking badge
|
||||
- [ ] Dropdown shows device list or install instructions
|
||||
- [ ] After `pip3 install cupy-cuda12x --break-system-packages` in WSL + restart → GPU visible
|
||||
- [ ] Terrain Profile click doesn't add ruler points
|
||||
- [ ] Coverage boundary follows actual signal contour
|
||||
- [ ] Results toast shows model count and frequency
|
||||
- [ ] Native Windows backend research document created
|
||||
@@ -0,0 +1,556 @@
|
||||
# RFCP — Iteration 3.6.0: Production GPU Build
|
||||
|
||||
## Overview
|
||||
|
||||
Enable GPU acceleration in the production PyInstaller build. Currently production
|
||||
runs CPU-only (NumPy) because CuPy is not included in rfcp-server.exe.
|
||||
|
||||
**Goal:** User with NVIDIA GPU installs RFCP → GPU detected automatically →
|
||||
coverage calculations use CUDA acceleration. No manual pip install required.
|
||||
|
||||
**Context from diagnostics screenshot:**
|
||||
```json
|
||||
{
|
||||
"python_executable": "C:\\Users\\Administrator\\AppData\\Local\\Programs\\RFCP\\resources\\backend\\rfcp-server.exe",
|
||||
"platform": "Windows-10-10.0.26288-SP0",
|
||||
"is_wsl": false,
|
||||
"numpy": { "version": "1.26.4" },
|
||||
"cuda": {
|
||||
"error": "CuPy not installed",
|
||||
"install_hint": "pip install cupy-cuda12x"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Architecture:** Production uses PyInstaller-bundled rfcp-server.exe (self-contained).
|
||||
CuPy not included → GPU not available for end users.
|
||||
|
||||
---
|
||||
|
||||
## Strategy: Two-Tier Build
|
||||
|
||||
Instead of one massive binary, produce two builds:
|
||||
|
||||
```
|
||||
RFCP-Setup-{version}.exe (~150 MB) — CPU-only, works everywhere
|
||||
RFCP-Setup-{version}-GPU.exe (~700 MB) — includes CuPy + CUDA runtime
|
||||
```
|
||||
|
||||
**Why not dynamic loading?**
|
||||
PyInstaller bundles everything at build time. CuPy can't be pip-installed
|
||||
into a frozen exe at runtime. Options are:
|
||||
|
||||
1. **Bundle CuPy in PyInstaller** ← cleanest, what we'll do
|
||||
2. Side-load CuPy DLLs (fragile, version-sensitive)
|
||||
3. Hybrid: unfrozen Python + CuPy installed separately (defeats purpose of exe)
|
||||
|
||||
---
|
||||
|
||||
## Task 1: PyInstaller Spec with CuPy (Priority 1 — 30 min)
|
||||
|
||||
### File: `installer/rfcp-server-gpu.spec`
|
||||
|
||||
Create a separate .spec file that includes CuPy:
|
||||
|
||||
```python
|
||||
# rfcp-server-gpu.spec — GPU-enabled build
|
||||
import os
|
||||
import sys
|
||||
from PyInstaller.utils.hooks import collect_all, collect_dynamic_libs
|
||||
|
||||
backend_path = os.path.abspath(os.path.join(os.path.dirname(SPEC), '..', 'backend'))
|
||||
|
||||
# Collect CuPy and its CUDA dependencies
|
||||
cupy_datas, cupy_binaries, cupy_hiddenimports = collect_all('cupy')
|
||||
# Also collect cupy_backends
|
||||
cupyb_datas, cupyb_binaries, cupyb_hiddenimports = collect_all('cupy_backends')
|
||||
|
||||
# CUDA runtime libraries that CuPy needs
|
||||
cuda_binaries = collect_dynamic_libs('cupy')
|
||||
|
||||
a = Analysis(
|
||||
[os.path.join(backend_path, 'run_server.py')],
|
||||
pathex=[backend_path],
|
||||
binaries=cupy_binaries + cupyb_binaries + cuda_binaries,
|
||||
datas=[
|
||||
(os.path.join(backend_path, 'data', 'terrain'), 'data/terrain'),
|
||||
] + cupy_datas + cupyb_datas,
|
||||
hiddenimports=[
|
||||
# Existing imports from rfcp-server.spec
|
||||
'uvicorn.logging',
|
||||
'uvicorn.loops',
|
||||
'uvicorn.loops.auto',
|
||||
'uvicorn.protocols',
|
||||
'uvicorn.protocols.http',
|
||||
'uvicorn.protocols.http.auto',
|
||||
'uvicorn.protocols.websockets',
|
||||
'uvicorn.protocols.websockets.auto',
|
||||
'uvicorn.lifespan',
|
||||
'uvicorn.lifespan.on',
|
||||
'motor',
|
||||
'pymongo',
|
||||
'numpy',
|
||||
'scipy',
|
||||
'shapely',
|
||||
'shapely.geometry',
|
||||
'shapely.ops',
|
||||
# CuPy-specific
|
||||
'cupy',
|
||||
'cupy.cuda',
|
||||
'cupy.cuda.runtime',
|
||||
'cupy.cuda.driver',
|
||||
'cupy.cuda.memory',
|
||||
'cupy.cuda.stream',
|
||||
'cupy._core',
|
||||
'cupy._core.core',
|
||||
'cupy._core._routines_math',
|
||||
'cupy.fft',
|
||||
'cupy.linalg',
|
||||
'fastrlock',
|
||||
] + cupy_hiddenimports + cupyb_hiddenimports,
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.datas,
|
||||
[],
|
||||
name='rfcp-server',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=False, # Don't compress CUDA libs — they need fast loading
|
||||
console=True,
|
||||
icon=os.path.join(os.path.dirname(SPEC), 'rfcp.ico'),
|
||||
)
|
||||
```
|
||||
|
||||
### Key Points:
|
||||
- `collect_all('cupy')` grabs all CuPy submodules + CUDA DLLs
|
||||
- `fastrlock` is a CuPy dependency (must be in hiddenimports)
|
||||
- `upx=False` — don't compress CUDA binaries (breaks them)
|
||||
- One-file mode (`a.binaries + a.datas` in EXE) for single exe
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Build Script for GPU Variant (Priority 1 — 15 min)
|
||||
|
||||
### File: `installer/build-gpu.bat` (Windows)
|
||||
|
||||
```batch
|
||||
@echo off
|
||||
echo ========================================
|
||||
echo RFCP GPU Build — rfcp-server-gpu.exe
|
||||
echo ========================================
|
||||
|
||||
REM Ensure CuPy is installed in build environment
|
||||
echo Checking CuPy installation...
|
||||
python -c "import cupy; print(f'CuPy {cupy.__version__} with CUDA {cupy.cuda.runtime.runtimeGetVersion()}')"
|
||||
if errorlevel 1 (
|
||||
echo ERROR: CuPy not installed. Run: pip install cupy-cuda12x
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
REM Build with GPU spec
|
||||
echo Building rfcp-server with GPU support...
|
||||
cd /d %~dp0\..\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
|
||||
|
||||
echo.
|
||||
echo Build complete! Output: dist\rfcp-server.exe
|
||||
echo Size:
|
||||
dir dist\rfcp-server.exe
|
||||
|
||||
REM Optional: copy to Electron resources
|
||||
if exist "..\desktop\resources" (
|
||||
copy /y dist\rfcp-server.exe ..\desktop\resources\rfcp-server.exe
|
||||
echo Copied to desktop\resources\
|
||||
)
|
||||
|
||||
pause
|
||||
```
|
||||
|
||||
### File: `installer/build-gpu.sh` (WSL/Linux)
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "========================================"
|
||||
echo " RFCP GPU Build — rfcp-server (GPU)"
|
||||
echo "========================================"
|
||||
|
||||
# Check CuPy
|
||||
python3 -c "import cupy; print(f'CuPy {cupy.__version__}')" 2>/dev/null || {
|
||||
echo "ERROR: CuPy not installed. Run: pip install cupy-cuda12x"
|
||||
exit 1
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/../backend"
|
||||
pyinstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
|
||||
|
||||
echo ""
|
||||
echo "Build complete!"
|
||||
ls -lh dist/rfcp-server*
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 3: GPU Backend — Graceful CuPy Detection (Priority 1 — 15 min)
|
||||
|
||||
### File: `backend/app/services/gpu_backend.py`
|
||||
|
||||
The existing gpu_backend.py should already handle CuPy absence gracefully.
|
||||
Verify and fix if needed:
|
||||
|
||||
```python
|
||||
# gpu_backend.py — must work in BOTH CPU and GPU builds
|
||||
|
||||
import numpy as np
|
||||
|
||||
# Try importing CuPy — this is the key detection
|
||||
_cupy_available = False
|
||||
_gpu_device_name = None
|
||||
_gpu_memory_mb = 0
|
||||
|
||||
try:
|
||||
import cupy as cp
|
||||
# Verify we can actually use it (not just import)
|
||||
device = cp.cuda.Device(0)
|
||||
_gpu_device_name = device.attributes.get('name', f'CUDA Device {device.id}')
|
||||
# Try to get name via runtime
|
||||
try:
|
||||
props = cp.cuda.runtime.getDeviceProperties(0)
|
||||
_gpu_device_name = props.get('name', _gpu_device_name)
|
||||
if isinstance(_gpu_device_name, bytes):
|
||||
_gpu_device_name = _gpu_device_name.decode('utf-8').strip('\x00')
|
||||
except Exception:
|
||||
pass
|
||||
_gpu_memory_mb = device.mem_info[1] // (1024 * 1024)
|
||||
_cupy_available = True
|
||||
except ImportError:
|
||||
cp = None # CuPy not installed (CPU build)
|
||||
except Exception as e:
|
||||
cp = None # CuPy installed but CUDA not available
|
||||
print(f"[GPU] CuPy found but CUDA unavailable: {e}")
|
||||
|
||||
|
||||
def is_gpu_available() -> bool:
|
||||
return _cupy_available
|
||||
|
||||
def get_gpu_info() -> dict:
|
||||
if _cupy_available:
|
||||
return {
|
||||
"available": True,
|
||||
"backend": "CuPy (CUDA)",
|
||||
"device": _gpu_device_name,
|
||||
"memory_mb": _gpu_memory_mb,
|
||||
}
|
||||
return {
|
||||
"available": False,
|
||||
"backend": "NumPy (CPU)",
|
||||
"device": "CPU",
|
||||
"memory_mb": 0,
|
||||
}
|
||||
|
||||
def get_array_module():
|
||||
"""Return cupy if available, otherwise numpy."""
|
||||
if _cupy_available:
|
||||
return cp
|
||||
return np
|
||||
```
|
||||
|
||||
### Usage in coverage_service.py:
|
||||
|
||||
```python
|
||||
from app.services.gpu_backend import get_array_module, is_gpu_available
|
||||
|
||||
xp = get_array_module() # cupy or numpy — same API
|
||||
|
||||
# All calculations use xp instead of np:
|
||||
distances = xp.sqrt(dx**2 + dy**2)
|
||||
path_loss = 20 * xp.log10(distances) + 20 * xp.log10(freq_mhz) - 27.55
|
||||
|
||||
# If using cupy, results need to come back to CPU for JSON serialization:
|
||||
if is_gpu_available():
|
||||
results = xp.asnumpy(path_loss)
|
||||
else:
|
||||
results = path_loss
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 4: GPU Status in Frontend Header (Priority 2 — 10 min)
|
||||
|
||||
### Update GPUIndicator.tsx
|
||||
|
||||
When GPU is detected, the badge should clearly show it:
|
||||
|
||||
```
|
||||
CPU build: [⚙ CPU] (gray badge)
|
||||
GPU detected: [⚡ RTX 4060] (green badge)
|
||||
```
|
||||
|
||||
The existing GPUIndicator already does this. Just verify:
|
||||
1. Badge color changes from gray → green when GPU available
|
||||
2. Dropdown shows "Active: GPU (CUDA)" not just "CPU (NumPy)"
|
||||
3. No install hints shown when CuPy IS available
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Build Environment Setup (Priority 1 — Manual by Олег)
|
||||
|
||||
### Prerequisites for GPU build:
|
||||
|
||||
```powershell
|
||||
# 1. Install CuPy in Windows Python (NOT WSL)
|
||||
pip install cupy-cuda12x
|
||||
|
||||
# 2. Verify CuPy works
|
||||
python -c "import cupy; print(cupy.cuda.runtime.runtimeGetVersion())"
|
||||
# Should print: 12000 or similar
|
||||
|
||||
# 3. Install PyInstaller if not present
|
||||
pip install pyinstaller
|
||||
|
||||
# 4. Verify fastrlock (CuPy dependency)
|
||||
pip install fastrlock
|
||||
```
|
||||
|
||||
### Build commands:
|
||||
|
||||
```powershell
|
||||
# CPU-only build (existing)
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server.spec --clean --noconfirm
|
||||
|
||||
# GPU build (new)
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
|
||||
```
|
||||
|
||||
### Expected output sizes:
|
||||
```
|
||||
rfcp-server.exe (CPU): ~80 MB
|
||||
rfcp-server.exe (GPU): ~600-800 MB (CuPy bundles CUDA runtime libs)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Electron — Detect Build Variant (Priority 2 — 10 min)
|
||||
|
||||
### File: `desktop/main.js` or `desktop/src/main.ts`
|
||||
|
||||
Add version detection so UI knows which build it's running:
|
||||
|
||||
```javascript
|
||||
// After backend starts, check GPU status
|
||||
async function checkBackendCapabilities() {
|
||||
try {
|
||||
const response = await fetch('http://127.0.0.1:8090/api/gpu/status');
|
||||
const data = await response.json();
|
||||
|
||||
// Send to renderer
|
||||
mainWindow.webContents.send('gpu-status', data);
|
||||
|
||||
if (data.available) {
|
||||
console.log(`[RFCP] GPU: ${data.device} (${data.memory_mb} MB)`);
|
||||
} else {
|
||||
console.log('[RFCP] Running in CPU mode');
|
||||
}
|
||||
} catch (e) {
|
||||
console.log('[RFCP] Backend not ready for GPU check');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 7: About / Version Info (Priority 3 — 5 min)
|
||||
|
||||
### Add build info to `/api/health` response:
|
||||
|
||||
```python
|
||||
@app.get("/api/health")
|
||||
async def health():
|
||||
gpu_info = get_gpu_info()
|
||||
return {
|
||||
"status": "ok",
|
||||
"version": "3.6.0",
|
||||
"build": "gpu" if gpu_info["available"] else "cpu",
|
||||
"gpu": gpu_info,
|
||||
"python": sys.version,
|
||||
"platform": platform.platform(),
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Build & Test Procedure
|
||||
|
||||
### Step 1: Setup Build Environment
|
||||
```powershell
|
||||
# Windows PowerShell (NOT WSL)
|
||||
cd D:\root\rfcp
|
||||
|
||||
# Verify Python environment
|
||||
python --version # Should be 3.11.x
|
||||
pip list | findstr cupy # Should show cupy-cuda12x
|
||||
|
||||
# If CuPy not installed:
|
||||
pip install cupy-cuda12x fastrlock
|
||||
```
|
||||
|
||||
### Step 2: Build GPU Variant
|
||||
```powershell
|
||||
cd D:\root\rfcp\backend
|
||||
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
|
||||
```
|
||||
|
||||
### Step 3: Test Standalone
|
||||
```powershell
|
||||
# Run the built exe directly
|
||||
.\dist\rfcp-server.exe
|
||||
|
||||
# In another terminal:
|
||||
curl http://localhost:8090/api/health
|
||||
curl http://localhost:8090/api/gpu/status
|
||||
curl http://localhost:8090/api/gpu/diagnostics
|
||||
```
|
||||
|
||||
### Step 4: Verify GPU Detection
|
||||
Expected `/api/gpu/status` response:
|
||||
```json
|
||||
{
|
||||
"available": true,
|
||||
"backend": "CuPy (CUDA)",
|
||||
"device": "NVIDIA GeForce RTX 4060 Laptop GPU",
|
||||
"memory_mb": 8188
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Run Coverage Calculation
|
||||
- Place a site on map
|
||||
- Calculate coverage (10km, 200m resolution)
|
||||
- Check logs for: `[GPU] Using CUDA: RTX 4060 (8188 MB)`
|
||||
- Compare performance: should be 5-10x faster than CPU
|
||||
|
||||
### Step 6: Full Electron Build
|
||||
```powershell
|
||||
# Copy GPU server to Electron resources
|
||||
copy backend\dist\rfcp-server.exe desktop\resources\
|
||||
|
||||
# Build Electron installer
|
||||
cd installer
|
||||
.\build-win.sh # or equivalent Windows script
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
### Size Concern
|
||||
CuPy bundles CUDA runtime (~500MB). Total GPU installer ~700-800MB.
|
||||
**Mitigation:** This is acceptable for a professional RF planning tool.
|
||||
AutoCAD is 7GB. QGIS is 1.5GB. Atoll is 3GB+.
|
||||
|
||||
### CUDA Version Compatibility
|
||||
CuPy-cuda12x requires CUDA 12.x compatible driver.
|
||||
RTX 4060 with Driver 581.42 → CUDA 13.0 → backward compatible ✅
|
||||
**Mitigation:** gpu_backend.py already falls back to NumPy gracefully.
|
||||
|
||||
### PyInstaller + CuPy Issues
|
||||
Known issues:
|
||||
- CuPy uses many .so/.dll files that PyInstaller might miss
|
||||
- `collect_all('cupy')` should catch them, but test thoroughly
|
||||
- If missing DLLs → add them manually to `binaries` list
|
||||
|
||||
**Mitigation:** Test the standalone exe on a clean machine (no Python installed).
|
||||
|
||||
### Antivirus False Positives
|
||||
Larger exe = more AV suspicion. PyInstaller exes already trigger some AV.
|
||||
**Mitigation:** Code-sign the exe (future task), submit to AV vendors for whitelisting.
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] `rfcp-server-gpu.spec` created and builds successfully
|
||||
- [ ] Built exe detects RTX 4060 on startup
|
||||
- [ ] `/api/gpu/status` returns `"available": true`
|
||||
- [ ] Coverage calculation uses CuPy (check logs)
|
||||
- [ ] GPU badge shows "⚡ RTX 4060" (green) in header
|
||||
- [ ] Fallback to NumPy works if CUDA unavailable
|
||||
- [ ] CPU-only spec (`rfcp-server.spec`) still builds and works
|
||||
- [ ] Build time < 10 minutes
|
||||
- [ ] GPU exe size < 1 GB
|
||||
|
||||
---
|
||||
|
||||
## Commit Message
|
||||
|
||||
```
|
||||
feat(build): add GPU-enabled PyInstaller build with CuPy + CUDA
|
||||
|
||||
- New rfcp-server-gpu.spec with CuPy/CUDA collection
|
||||
- Build scripts: build-gpu.bat, build-gpu.sh
|
||||
- Graceful GPU detection in gpu_backend.py
|
||||
- Two-tier build: CPU (~80MB) and GPU (~700MB) variants
|
||||
- Auto-detection: RTX 4060 → CuPy acceleration
|
||||
- Fallback: no CUDA → NumPy (CPU mode)
|
||||
|
||||
Iteration 3.6.0 — Production GPU Build
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Files Summary
|
||||
|
||||
### New Files:
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `installer/rfcp-server-gpu.spec` | PyInstaller config with CuPy |
|
||||
| `installer/build-gpu.bat` | Windows GPU build script |
|
||||
| `installer/build-gpu.sh` | Linux/WSL GPU build script |
|
||||
|
||||
### Modified Files:
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `backend/app/services/gpu_backend.py` | Verify graceful detection |
|
||||
| `backend/app/main.py` | Health endpoint with build info |
|
||||
| `desktop/main.js` or `main.ts` | GPU status check after backend start |
|
||||
| `frontend/src/components/ui/GPUIndicator.tsx` | Verify badge shows GPU |
|
||||
|
||||
### No Changes Needed:
|
||||
| File | Reason |
|
||||
|------|--------|
|
||||
| `installer/rfcp-server.spec` | CPU build stays as-is |
|
||||
| `backend/app/services/coverage_service.py` | Already uses get_array_module() |
|
||||
| `installer/build-win.sh` | Existing CPU build unchanged |
|
||||
|
||||
---
|
||||
|
||||
## Timeline
|
||||
|
||||
| Phase | Task | Time |
|
||||
|-------|------|------|
|
||||
| **P1** | Create rfcp-server-gpu.spec | 30 min |
|
||||
| **P1** | Build scripts | 15 min |
|
||||
| **P1** | Verify gpu_backend.py | 15 min |
|
||||
| **P2** | Frontend badge verification | 10 min |
|
||||
| **P2** | Electron GPU status | 10 min |
|
||||
| **P3** | Health endpoint update | 5 min |
|
||||
| **Test** | Build + test standalone | 20 min |
|
||||
| **Test** | Full Electron build | 15 min |
|
||||
| | **Total** | **~2 hours** |
|
||||
|
||||
**Claude Code estimated time: 10-15 min** (spec + scripts + backend changes)
|
||||
**Manual testing by Олег: 30-45 min** (building + verifying)
|
||||
220
docs/devlog/gpu_supp/RFCP-Roadmap-Updated-2026-02-04.md
Normal file
220
docs/devlog/gpu_supp/RFCP-Roadmap-Updated-2026-02-04.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# RFCP Project Roadmap — Updated February 4, 2026
|
||||
|
||||
**Project:** RFCP (RF Coverage Planning) for UMTC
|
||||
**Developer:** Олег + Claude
|
||||
**Started:** January 30, 2025
|
||||
**Current Version:** 3.8.0 (GPU Acceleration Complete)
|
||||
|
||||
---
|
||||
|
||||
## ✅ Completed Milestones
|
||||
|
||||
### Phase 1: Frontend (January 2025)
|
||||
- ✅ React + TypeScript + Vite + Leaflet
|
||||
- ✅ Multi-site RF coverage planning
|
||||
- ✅ Multi-sector sites (Alpha/Beta/Gamma)
|
||||
- ✅ Geographic-scale canvas heatmap
|
||||
- ✅ Keyboard shortcuts + delete confirmation
|
||||
- ✅ NumberInput components with sliders
|
||||
- ✅ TypeScript strict mode, ESLint clean
|
||||
- ✅ Production build: 536KB / 163KB gzipped
|
||||
|
||||
### Phase 2: Backend Architecture (February 1, 2025)
|
||||
- ✅ Python FastAPI + NumPy + ProcessPoolExecutor
|
||||
- ✅ 8 propagation models (FreeSpace, Okumura-Hata, COST-231, ITU-R P.1546, etc.)
|
||||
- ✅ Modular geometry engine (haversine, intersection, reflection, diffraction, LOS)
|
||||
- ✅ SharedMemoryManager for terrain data (zero-copy, 25 MB)
|
||||
- ✅ Building filtering (351k → 27k bbox → 15k cap)
|
||||
- ✅ Overpass API with retry + mirror failover
|
||||
- ✅ WebSocket progress streaming
|
||||
|
||||
### Phase 3: Performance (February 2-3, 2025)
|
||||
- ✅ LOD (Level of Detail) optimization
|
||||
- ✅ Spatial indexing for buildings (R-tree)
|
||||
- ✅ Dominant path simplification for distant points
|
||||
- ✅ OOM fix + memory management
|
||||
- ✅ CloudRF-style color gradient
|
||||
- ✅ Results popup + session history
|
||||
- ✅ Terrain profile viewer
|
||||
|
||||
### Phase 4: GPU Acceleration (February 3-4, 2025) ⭐
|
||||
- ✅ CuPy + CUDA backend (RTX 4060)
|
||||
- ✅ CUDA Toolkit 13.1 + cupy-cuda13x setup
|
||||
- ✅ Phase 2.5: Vectorized distances + path_loss (0.006s)
|
||||
- ✅ Phase 2.6: Vectorized terrain LOS + diffraction (0.04s)
|
||||
- ✅ Phase 2.7: Vectorized antenna pattern loss
|
||||
- ✅ Vegetation bbox pre-filter (100x+ speedup)
|
||||
- ✅ Worker process isolation (no CUDA in workers)
|
||||
- ✅ PyInstaller ONEDIR GPU build (1.2 GB installer)
|
||||
- ✅ **Full preset: 195s → 11.2s (17.4x speedup)**
|
||||
|
||||
### Supporting Work
|
||||
- ✅ RF Radio Theory wiki article (comprehensive)
|
||||
- ✅ Propagation model research (CloudRF, SPLAT!, Signal Server)
|
||||
- ✅ RFCP Method collaboration framework documented
|
||||
|
||||
---
|
||||
|
||||
## 📊 Current Performance
|
||||
|
||||
| Preset | Points | Resolution | Time (cached) | Time (cold) |
|
||||
|--------|--------|-----------|---------------|-------------|
|
||||
| Standard | 1,975 | 200m | **2.3s** | ~12s |
|
||||
| Full | 6,640 | 50m | **11.2s** | ~20s |
|
||||
| 50km radius | 4,966 | adaptive | ~410s | ~420s |
|
||||
|
||||
**Hardware:** Windows 11, RTX 4060 Laptop GPU, 6-core CPU
|
||||
|
||||
---
|
||||
|
||||
## 🔜 Next: Phase 5 — Data & Accuracy
|
||||
|
||||
### 5.1 SRTM Terrain Integration
|
||||
**Priority:** HIGH
|
||||
**Status:** Not started
|
||||
|
||||
Current terrain: Single HGT tile download per calculation
|
||||
Target: Pre-cached SRTM/ASTER DEM tiles with proper interpolation
|
||||
|
||||
- [ ] SRTM tile manager (auto-download, cache)
|
||||
- [ ] Bilinear interpolation for elevation sampling
|
||||
- [ ] Multi-tile coverage for large radius
|
||||
- [ ] Terrain profile accuracy validation
|
||||
- [ ] Compare with current terrain data quality
|
||||
|
||||
### 5.2 Project Persistence
|
||||
**Priority:** MEDIUM
|
||||
|
||||
- [ ] Save/load projects (JSON or SQLite)
|
||||
- [ ] Site configurations persistence
|
||||
- [ ] Coverage results caching
|
||||
- [ ] Session history persistence across restarts
|
||||
- [ ] Export coverage report (PDF/PNG)
|
||||
|
||||
### 5.3 Accuracy Validation
|
||||
**Priority:** MEDIUM
|
||||
|
||||
- [ ] Compare with known coverage maps
|
||||
- [ ] Field measurements with real equipment
|
||||
- [ ] Calibrate propagation models per environment
|
||||
- [ ] Antenna pattern library (real equipment specs)
|
||||
|
||||
---
|
||||
|
||||
## 🔮 Future Phases
|
||||
|
||||
### Phase 6: Multi-Station & Dashboard
|
||||
- [ ] Multi-station view (aggregate coverage)
|
||||
- [ ] Station discovery via WireGuard mesh
|
||||
- [ ] Coverage gap analysis
|
||||
- [ ] Interference modeling between stations
|
||||
- [ ] Handover zone visualization
|
||||
|
||||
### Phase 7: Hardware Integration
|
||||
- [ ] LimeSDR Mini 2.0 testing
|
||||
- [ ] Real RF attach validation
|
||||
- [ ] sysmoISIM-SJA2 SIM integration
|
||||
- [ ] ZTE B8200 base station testing
|
||||
- [ ] INFOZAHYST Plastun SDR (if accessible)
|
||||
|
||||
### Phase 8: Advanced Features
|
||||
- [ ] 3D visualization mode
|
||||
- [ ] Link budget analysis view
|
||||
- [ ] Frequency planning tool
|
||||
- [ ] Indoor coverage modeling
|
||||
- [ ] Time-series analysis (seasonal vegetation)
|
||||
- [ ] Offline mode (embedded terrain DB)
|
||||
|
||||
### Phase 9: Distribution
|
||||
- [ ] Auto-updater (electron-updater)
|
||||
- [ ] Live USB distribution for field deployment
|
||||
- [ ] Standalone offline package
|
||||
- [ ] User documentation / help system
|
||||
|
||||
---
|
||||
|
||||
## 🏛️ Architecture Overview
|
||||
|
||||
```
|
||||
RFCP Application (Electron)
|
||||
├── Frontend (React + TypeScript + Vite)
|
||||
│ ├── Leaflet map with custom canvas heatmap
|
||||
│ ├── Zustand state management
|
||||
│ └── WebSocket for progress streaming
|
||||
│
|
||||
├── Backend (Python FastAPI)
|
||||
│ ├── Coverage Engine
|
||||
│ │ ├── Grid generator (adaptive zones)
|
||||
│ │ ├── GPU pipeline (CuPy/CUDA) — main process
|
||||
│ │ │ ├── Phase 2.5: distances + path_loss
|
||||
│ │ │ ├── Phase 2.6: terrain LOS + diffraction
|
||||
│ │ │ └── Phase 2.7: antenna pattern
|
||||
│ │ └── CPU workers (ProcessPool) — 3-6 workers
|
||||
│ │ ├── Building obstruction (spatial index)
|
||||
│ │ ├── Reflections (ray-building intersection)
|
||||
│ │ └── Vegetation loss (bbox pre-filter)
|
||||
│ │
|
||||
│ ├── Propagation Models (8 models)
|
||||
│ │ ├── Free-Space Path Loss
|
||||
│ │ ├── Okumura-Hata (150-1500 MHz)
|
||||
│ │ ├── COST-231-Hata (1500-2000 MHz)
|
||||
│ │ ├── ITU-R P.1546
|
||||
│ │ └── ... 4 more
|
||||
│ │
|
||||
│ ├── OSM Services
|
||||
│ │ ├── Buildings (Overpass API + cache)
|
||||
│ │ ├── Vegetation (bbox pre-filter)
|
||||
│ │ ├── Water bodies
|
||||
│ │ └── Streets
|
||||
│ │
|
||||
│ └── Terrain Service
|
||||
│ ├── HGT tile download + cache
|
||||
│ ├── Elevation sampling
|
||||
│ └── Line-of-sight checking
|
||||
│
|
||||
└── Desktop (Electron)
|
||||
├── Backend process management
|
||||
└── NSIS installer (1.2 GB with CUDA)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📈 Development Timeline
|
||||
|
||||
```
|
||||
Jan 30, 2025 Phase 1: Frontend complete (10 iterations)
|
||||
Feb 01, 2025 Phase 2: Backend architecture (48 files, 82 tests)
|
||||
Feb 02, 2025 Phase 3: LOD + performance optimization
|
||||
Feb 03, 2025 Phase 3.5-3.6: GPU setup + CUDA build
|
||||
Feb 04, 2025 Phase 3.7-3.8: GPU vectorization complete ⭐
|
||||
─────────────────────────────────────────
|
||||
Full preset: 195s → 11.2s (17.4x speedup)
|
||||
Standard: 38s → 2.3s (16.5x speedup)
|
||||
```
|
||||
|
||||
**Total development time:** ~5 days intensive
|
||||
**Total iterations:** 3.8.0 (20+ sub-iterations)
|
||||
**Architecture:** Battle-tested, production-ready
|
||||
|
||||
---
|
||||
|
||||
## 🧰 Tech Stack
|
||||
|
||||
| Component | Technology | Version |
|
||||
|-----------|-----------|---------|
|
||||
| Frontend | React + TypeScript | 18 |
|
||||
| Build | Vite | 5.x |
|
||||
| Map | Leaflet | 1.9 |
|
||||
| State | Zustand | 4.x |
|
||||
| Backend | Python FastAPI | 3.12 |
|
||||
| GPU | CuPy + CUDA | 13.x |
|
||||
| Parallel | ProcessPoolExecutor | stdlib |
|
||||
| Terrain | NumPy (HGT tiles) | 1.26 |
|
||||
| Desktop | Electron | 28.x |
|
||||
| Installer | NSIS (via electron-builder) | - |
|
||||
| Build (BE) | PyInstaller | 6.x |
|
||||
|
||||
---
|
||||
|
||||
*"11.2 seconds. Full preset. 6,640 points. GPU acceleration complete."*
|
||||
*— February 4, 2026*
|
||||
345
docs/devlog/gpu_supp/RFCP-WebGL-Radial-Gradients-Task.md
Normal file
345
docs/devlog/gpu_supp/RFCP-WebGL-Radial-Gradients-Task.md
Normal file
@@ -0,0 +1,345 @@
|
||||
# RFCP: WebGL Radial Gradients Coverage Layer
|
||||
|
||||
## Мета
|
||||
|
||||
Переробити WebGL coverage layer з texture-based підходу на **radial gradients** — як працює Canvas GeographicHeatmap, але на GPU.
|
||||
|
||||
## Чому radial gradients краще для візуалізації
|
||||
|
||||
**Texture-based (поточний):**
|
||||
- Кожна точка = 1 pixel в grid
|
||||
- Nearest neighbor fill → blocky квадрати
|
||||
- Навіть з smoothstep — видно grid структуру
|
||||
- ✅ Добре для: terrain detail, точні значення
|
||||
- ❌ Погано для: красива візуалізація
|
||||
|
||||
**Radial gradients (Canvas heatmap):**
|
||||
- Кожна точка = круг з radial falloff
|
||||
- Smooth blending між точками
|
||||
- Природній вигляд coverage
|
||||
- ✅ Добре для: красива візуалізація, презентації
|
||||
- ❌ Погано для: точні значення (blending спотворює)
|
||||
|
||||
## Архітектура WebGL Radial Gradients
|
||||
|
||||
### Підхід: Multi-pass additive blending
|
||||
|
||||
```
|
||||
Pass 1-N: Для кожної точки (або batch точок)
|
||||
├── Малюємо full-screen quad
|
||||
├── Fragment shader: radial falloff від центру точки
|
||||
├── Output: (weight * value, weight, 0, 1)
|
||||
└── Blending: GL_ONE, GL_ONE (additive)
|
||||
|
||||
Final Pass:
|
||||
├── Читаємо accumulated texture
|
||||
├── Normalize: value = R / G (weighted average)
|
||||
└── Apply colormap
|
||||
```
|
||||
|
||||
### Альтернатива: Single-pass з texture atlas
|
||||
|
||||
Замість N проходів, закодувати всі точки в texture і в одному fragment shader пройтись по всіх:
|
||||
|
||||
```glsl
|
||||
// Fragment shader
|
||||
uniform sampler2D u_points; // texture з точками: (lat, lon, rsrp, radius)
|
||||
uniform int u_pointCount;
|
||||
|
||||
void main() {
|
||||
vec2 worldPos = getWorldPosition(v_uv);
|
||||
|
||||
float totalWeight = 0.0;
|
||||
float totalValue = 0.0;
|
||||
|
||||
for (int i = 0; i < MAX_POINTS; i++) {
|
||||
if (i >= u_pointCount) break;
|
||||
|
||||
vec4 point = texelFetch(u_points, ivec2(i, 0), 0);
|
||||
vec2 pointPos = point.xy;
|
||||
float rsrp = point.z;
|
||||
float radius = point.w;
|
||||
|
||||
float dist = distance(worldPos, pointPos);
|
||||
float weight = smoothstep(radius, 0.0, dist);
|
||||
|
||||
totalWeight += weight;
|
||||
totalValue += weight * rsrp;
|
||||
}
|
||||
|
||||
if (totalWeight < 0.001) discard;
|
||||
|
||||
float avgRsrp = totalValue / totalWeight;
|
||||
vec3 color = rsrpToColor(avgRsrp);
|
||||
|
||||
gl_FragColor = vec4(color, smoothstep(0.0, 0.1, totalWeight));
|
||||
}
|
||||
```
|
||||
|
||||
**Проблема:** Loop по 6,675 точках в кожному fragment = дуже повільно.
|
||||
|
||||
### Рекомендований підхід: Batched additive blending
|
||||
|
||||
```
|
||||
1. Створити offscreen framebuffer (float texture)
|
||||
2. Для кожної точки (або batch по 100-500):
|
||||
- Малювати quad розміром з radius точки
|
||||
- Additive blend: (weight * rsrp, weight)
|
||||
3. Final pass: normalize + colormap
|
||||
```
|
||||
|
||||
Це як Mapbox heatmap працює.
|
||||
|
||||
---
|
||||
|
||||
## Імплементація
|
||||
|
||||
### Крок 1: Створити offscreen framebuffer
|
||||
|
||||
```typescript
|
||||
// Accumulation texture (RG float for weighted sum)
|
||||
const accumTexture = gl.createTexture();
|
||||
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RG32F, width, height, 0, gl.RG, gl.FLOAT, null);
|
||||
|
||||
const framebuffer = gl.createFramebuffer();
|
||||
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
|
||||
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, accumTexture, 0);
|
||||
```
|
||||
|
||||
**Примітка:** Потрібен `EXT_color_buffer_float` extension для float framebuffer.
|
||||
|
||||
### Крок 2: Point rendering shader
|
||||
|
||||
**Vertex shader:**
|
||||
```glsl
|
||||
attribute vec2 a_position; // quad vertices
|
||||
attribute vec2 a_pointCenter; // point lat/lon (instanced)
|
||||
attribute float a_pointRsrp; // point RSRP (instanced)
|
||||
attribute float a_pointRadius; // point radius in pixels (instanced)
|
||||
|
||||
uniform mat4 u_matrix; // world to clip transform
|
||||
|
||||
varying vec2 v_localPos; // position relative to point center
|
||||
varying float v_rsrp;
|
||||
|
||||
void main() {
|
||||
// Expand quad around point center
|
||||
vec2 worldPos = a_pointCenter + a_position * a_pointRadius;
|
||||
gl_Position = u_matrix * vec4(worldPos, 0.0, 1.0);
|
||||
|
||||
v_localPos = a_position; // -1 to 1
|
||||
v_rsrp = a_pointRsrp;
|
||||
}
|
||||
```
|
||||
|
||||
**Fragment shader:**
|
||||
```glsl
|
||||
precision highp float;
|
||||
|
||||
varying vec2 v_localPos;
|
||||
varying float v_rsrp;
|
||||
|
||||
void main() {
|
||||
// Radial distance from center (0 at center, 1 at edge)
|
||||
float dist = length(v_localPos);
|
||||
|
||||
// Discard outside circle
|
||||
if (dist > 1.0) discard;
|
||||
|
||||
// Radial falloff (smooth at edges)
|
||||
float weight = 1.0 - smoothstep(0.0, 1.0, dist);
|
||||
// Or gaussian: weight = exp(-dist * dist * 2.0);
|
||||
|
||||
// Output: (weight * normalized_rsrp, weight)
|
||||
float normalizedRsrp = (v_rsrp + 130.0) / 80.0; // -130 to -50 → 0 to 1
|
||||
gl_FragColor = vec4(weight * normalizedRsrp, weight, 0.0, 1.0);
|
||||
}
|
||||
```
|
||||
|
||||
### Крок 3: Final compositing shader
|
||||
|
||||
```glsl
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_accumTexture;
|
||||
varying vec2 v_uv;
|
||||
|
||||
vec3 rsrpToColor(float t) {
|
||||
// t: 0 = weak (red), 1 = strong (cyan)
|
||||
if (t < 0.25) return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), t / 0.25);
|
||||
if (t < 0.5) return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.25) / 0.25);
|
||||
if (t < 0.75) return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.5) / 0.25);
|
||||
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.75) / 0.25);
|
||||
}
|
||||
|
||||
void main() {
|
||||
vec2 accum = texture2D(u_accumTexture, v_uv).rg;
|
||||
|
||||
float totalValue = accum.r;
|
||||
float totalWeight = accum.g;
|
||||
|
||||
// No coverage
|
||||
if (totalWeight < 0.001) discard;
|
||||
|
||||
// Weighted average RSRP
|
||||
float avgRsrp = totalValue / totalWeight;
|
||||
|
||||
// Color mapping
|
||||
vec3 color = rsrpToColor(avgRsrp);
|
||||
|
||||
// Alpha based on weight (fade at edges)
|
||||
float alpha = smoothstep(0.0, 0.1, totalWeight) * 0.85;
|
||||
|
||||
gl_FragColor = vec4(color, alpha);
|
||||
}
|
||||
```
|
||||
|
||||
### Крок 4: Rendering loop
|
||||
|
||||
```typescript
|
||||
function render() {
|
||||
const canvas = canvasRef.current;
|
||||
const gl = glRef.current;
|
||||
|
||||
// 1. Position canvas over map
|
||||
const nw = map.latLngToLayerPoint([bounds.maxLat, bounds.minLon]);
|
||||
const se = map.latLngToLayerPoint([bounds.minLat, bounds.maxLon]);
|
||||
canvas.style.transform = `translate(${nw.x}px, ${nw.y}px)`;
|
||||
canvas.style.width = `${se.x - nw.x}px`;
|
||||
canvas.style.height = `${se.y - nw.y}px`;
|
||||
|
||||
// 2. Clear accumulation buffer
|
||||
gl.bindFramebuffer(gl.FRAMEBUFFER, accumFramebuffer);
|
||||
gl.clearColor(0, 0, 0, 0);
|
||||
gl.clear(gl.COLOR_BUFFER_BIT);
|
||||
|
||||
// 3. Render points with additive blending
|
||||
gl.useProgram(pointProgram);
|
||||
gl.enable(gl.BLEND);
|
||||
gl.blendFunc(gl.ONE, gl.ONE); // Additive
|
||||
|
||||
// Set uniforms (matrix, etc.)
|
||||
const matrix = calculateWorldToClipMatrix(bounds, canvas.width, canvas.height);
|
||||
gl.uniformMatrix4fv(u_matrix, false, matrix);
|
||||
|
||||
// Draw all points (instanced if supported, or batched)
|
||||
drawPoints(gl, points);
|
||||
|
||||
// 4. Final composite pass
|
||||
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
|
||||
gl.useProgram(compositeProgram);
|
||||
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA); // Normal blend
|
||||
|
||||
gl.activeTexture(gl.TEXTURE0);
|
||||
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
|
||||
|
||||
drawFullscreenQuad(gl);
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Оптимізації
|
||||
|
||||
### 1. Instanced rendering (якщо підтримується)
|
||||
|
||||
```typescript
|
||||
const ext = gl.getExtension('ANGLE_instanced_arrays');
|
||||
if (ext) {
|
||||
// Use instanced rendering - draw all points in one call
|
||||
ext.drawArraysInstancedANGLE(gl.TRIANGLE_STRIP, 0, 4, points.length);
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Spatial culling
|
||||
|
||||
Малювати тільки точки що потрапляють у viewport:
|
||||
|
||||
```typescript
|
||||
const visiblePoints = points.filter(p => {
|
||||
const screenPos = map.latLngToContainerPoint([p.lat, p.lon]);
|
||||
return screenPos.x > -radius && screenPos.x < canvas.width + radius &&
|
||||
screenPos.y > -radius && screenPos.y < canvas.height + radius;
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Dynamic radius based on zoom
|
||||
|
||||
```typescript
|
||||
const zoom = map.getZoom();
|
||||
const metersPerPixel = 40075016.686 * Math.cos(centerLat * Math.PI / 180) / Math.pow(2, zoom + 8);
|
||||
const radiusPixels = (settings.resolution * 1.5) / metersPerPixel;
|
||||
```
|
||||
|
||||
### 4. Resolution scaling
|
||||
|
||||
На низьких zoom рівнях, рендерити в менший framebuffer і upscale:
|
||||
|
||||
```typescript
|
||||
const scale = zoom < 10 ? 0.5 : zoom < 12 ? 0.75 : 1.0;
|
||||
const fbWidth = Math.round(canvas.width * scale);
|
||||
const fbHeight = Math.round(canvas.height * scale);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Порівняння з поточним texture-based
|
||||
|
||||
| Аспект | Texture-based | Radial gradients |
|
||||
|--------|---------------|------------------|
|
||||
| Візуалізація | Blocky | Smooth |
|
||||
| Terrain detail | Добре | Менш точно |
|
||||
| Performance | Швидко (1 draw call) | Повільніше (N points) |
|
||||
| Memory | Texture size | Framebuffer + points |
|
||||
| Код складність | Середня | Висока |
|
||||
|
||||
---
|
||||
|
||||
## Чеклист імплементації
|
||||
|
||||
### Phase 1: Basic setup
|
||||
- [ ] Створити новий файл `WebGLRadialCoverageLayer.tsx`
|
||||
- [ ] Setup WebGL context з float extensions
|
||||
- [ ] Створити accumulation framebuffer
|
||||
- [ ] Базовий vertex/fragment shader для точок
|
||||
|
||||
### Phase 2: Point rendering
|
||||
- [ ] Implement point quad rendering
|
||||
- [ ] Radial falloff function
|
||||
- [ ] Additive blending
|
||||
- [ ] Test з кількома точками
|
||||
|
||||
### Phase 3: Compositing
|
||||
- [ ] Final pass shader
|
||||
- [ ] Weighted average calculation
|
||||
- [ ] Color mapping
|
||||
- [ ] Alpha/transparency
|
||||
|
||||
### Phase 4: Integration
|
||||
- [ ] Map positioning (як в поточному WebGL layer)
|
||||
- [ ] Map event listeners (move/zoom)
|
||||
- [ ] Opacity control
|
||||
- [ ] Toggle в UI
|
||||
|
||||
### Phase 5: Optimization
|
||||
- [ ] Instanced rendering
|
||||
- [ ] Spatial culling
|
||||
- [ ] Dynamic radius
|
||||
- [ ] Resolution scaling
|
||||
|
||||
---
|
||||
|
||||
## Fallback
|
||||
|
||||
Якщо WebGL radial не працює (older GPU, missing extensions):
|
||||
- Fallback до Canvas GeographicHeatmap
|
||||
- Або до поточного texture-based WebGL
|
||||
|
||||
---
|
||||
|
||||
## Референси
|
||||
|
||||
1. [Mapbox GL Heatmap implementation](https://github.com/mapbox/mapbox-gl-js/blob/main/src/render/draw_heatmap.js)
|
||||
2. [deck.gl HeatmapLayer](https://deck.gl/docs/api-reference/aggregation-layers/heatmap-layer)
|
||||
3. [WebGL additive blending](https://webglfundamentals.org/webgl/lessons/webgl-text-texture.html)
|
||||
281
docs/devlog/gpu_supp/RFCP-WebGL-Smooth-Coverage-Task.md
Normal file
281
docs/devlog/gpu_supp/RFCP-WebGL-Smooth-Coverage-Task.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# RFCP v3.10.5: WebGL Smooth Coverage Implementation
|
||||
|
||||
## Контекст проблеми
|
||||
|
||||
**Поточний стан:**
|
||||
- Backend повертає grid точок з lat/lon/RSRP (50m = 6,675 pts, 200m = 1,975 pts)
|
||||
- WebGL texture-based rendering: points → texture → GL_LINEAR → colormap
|
||||
- **Проблема:** Видимі grid squares/pixelation, особливо при zoom in або sparse grids (200m)
|
||||
|
||||
**Причина:**
|
||||
- `GL_LINEAR` дає тільки C0 continuity (значення співпадають на краях, але похідні — ні)
|
||||
- Це створює видимі "шви" між клітинками
|
||||
|
||||
## Рішення з ресерчу
|
||||
|
||||
### Ключовий інсайт
|
||||
|
||||
**Catmull-Rom spline interpolation** дає C1 continuity (smooth derivatives) І проходить через exact data values (на відміну від B-spline який blurs peaks).
|
||||
|
||||
**9-tap Catmull-Rom** замість `texture2D()`:
|
||||
- 9 texture fetches замість 1
|
||||
- ~0.32ms vs ~0.30ms на GTX 980 при 1920×1080
|
||||
- Для нашої ~80×85 текстури — практично безкоштовно
|
||||
|
||||
### Критичне правило
|
||||
|
||||
**Інтерполювати RAW RSRP values ПЕРЕД colormap!**
|
||||
- ❌ Неправильно: texture → colormap → interpolate (muddy colors)
|
||||
- ✅ Правильно: texture → interpolate → colormap (clean gradients)
|
||||
|
||||
---
|
||||
|
||||
## Етап 1: Quick Fix (30 хвилин)
|
||||
|
||||
### Smoothstep coordinate remapping
|
||||
|
||||
Найшвидший спосіб прибрати grid edges — одна зміна в shader:
|
||||
|
||||
```glsl
|
||||
// ЗАМІСТЬ:
|
||||
vec4 texColor = texture2D(u_texture, v_uv);
|
||||
|
||||
// ВИКОРИСТАТИ:
|
||||
vec4 textureSmooth(sampler2D tex, vec2 uv, vec2 texSize) {
|
||||
vec2 p = uv * texSize + 0.5;
|
||||
vec2 i = floor(p);
|
||||
vec2 f = p - i;
|
||||
f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0); // quintic hermite
|
||||
return texture2D(tex, (i + f - 0.5) / texSize);
|
||||
}
|
||||
|
||||
// В main():
|
||||
vec4 texColor = textureSmooth(u_texture, v_uv, u_textureSize);
|
||||
```
|
||||
|
||||
**Що це дає:**
|
||||
- C2 continuity з одним texture read
|
||||
- Прибирає видимі grid edges
|
||||
- Мінімальний positional bias
|
||||
|
||||
**Потрібно додати uniform:**
|
||||
```javascript
|
||||
const textureSizeLocation = gl.getUniformLocation(program, 'u_textureSize');
|
||||
gl.uniform2f(textureSizeLocation, textureWidth, textureHeight);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Етап 2: Production Implementation (1-2 години)
|
||||
|
||||
### 9-tap Catmull-Rom Shader
|
||||
|
||||
```glsl
|
||||
precision highp float;
|
||||
|
||||
uniform sampler2D u_texture;
|
||||
uniform vec2 u_textureSize;
|
||||
uniform float u_opacity;
|
||||
varying vec2 v_uv;
|
||||
|
||||
// Catmull-Rom 9-tap interpolation
|
||||
// Source: TheRealMJP's gist (108 GitHub stars)
|
||||
vec4 SampleTextureCatmullRom(sampler2D tex, vec2 uv, vec2 texSize) {
|
||||
vec2 samplePos = uv * texSize;
|
||||
vec2 texPos1 = floor(samplePos - 0.5) + 0.5;
|
||||
vec2 f = samplePos - texPos1;
|
||||
|
||||
// Catmull-Rom weights
|
||||
vec2 w0 = f * (-0.5 + f * (1.0 - 0.5 * f));
|
||||
vec2 w1 = 1.0 + f * f * (-2.5 + 1.5 * f);
|
||||
vec2 w2 = f * (0.5 + f * (2.0 - 1.5 * f));
|
||||
vec2 w3 = f * f * (-0.5 + 0.5 * f);
|
||||
|
||||
// Combine weights for optimized sampling
|
||||
vec2 w12 = w1 + w2;
|
||||
vec2 offset12 = w2 / (w1 + w2);
|
||||
|
||||
// Compute texture coordinates
|
||||
vec2 texPos0 = (texPos1 - 1.0) / texSize;
|
||||
vec2 texPos3 = (texPos1 + 2.0) / texSize;
|
||||
vec2 texPos12 = (texPos1 + offset12) / texSize;
|
||||
|
||||
// 9 texture fetches (optimized from 16)
|
||||
vec4 result = vec4(0.0);
|
||||
result += texture2D(tex, vec2(texPos0.x, texPos0.y)) * w0.x * w0.y;
|
||||
result += texture2D(tex, vec2(texPos12.x, texPos0.y)) * w12.x * w0.y;
|
||||
result += texture2D(tex, vec2(texPos3.x, texPos0.y)) * w3.x * w0.y;
|
||||
result += texture2D(tex, vec2(texPos0.x, texPos12.y)) * w0.x * w12.y;
|
||||
result += texture2D(tex, vec2(texPos12.x, texPos12.y)) * w12.x * w12.y;
|
||||
result += texture2D(tex, vec2(texPos3.x, texPos12.y)) * w3.x * w12.y;
|
||||
result += texture2D(tex, vec2(texPos0.x, texPos3.y)) * w0.x * w3.y;
|
||||
result += texture2D(tex, vec2(texPos12.x, texPos3.y)) * w12.x * w3.y;
|
||||
result += texture2D(tex, vec2(texPos3.x, texPos3.y)) * w3.x * w3.y;
|
||||
return result;
|
||||
}
|
||||
|
||||
// RSRP to color mapping (cyan -> green -> yellow -> orange -> red)
|
||||
vec3 rsrpToColor(float rsrp) {
|
||||
// rsrp: normalized 0.0 (weak, -110dBm) to 1.0 (strong, -50dBm)
|
||||
|
||||
// Color stops: red -> orange -> yellow -> green -> cyan
|
||||
vec3 c0 = vec3(1.0, 0.0, 0.0); // red (weak)
|
||||
vec3 c1 = vec3(1.0, 0.5, 0.0); // orange
|
||||
vec3 c2 = vec3(1.0, 1.0, 0.0); // yellow
|
||||
vec3 c3 = vec3(0.0, 1.0, 0.0); // green
|
||||
vec3 c4 = vec3(0.0, 1.0, 1.0); // cyan (strong)
|
||||
|
||||
float t = clamp(rsrp, 0.0, 1.0);
|
||||
|
||||
if (t < 0.25) {
|
||||
return mix(c0, c1, t / 0.25);
|
||||
} else if (t < 0.5) {
|
||||
return mix(c1, c2, (t - 0.25) / 0.25);
|
||||
} else if (t < 0.75) {
|
||||
return mix(c2, c3, (t - 0.5) / 0.25);
|
||||
} else {
|
||||
return mix(c3, c4, (t - 0.75) / 0.25);
|
||||
}
|
||||
}
|
||||
|
||||
void main() {
|
||||
// 1. Sample with Catmull-Rom interpolation (RAW value)
|
||||
vec4 texColor = SampleTextureCatmullRom(u_texture, v_uv, u_textureSize);
|
||||
float rsrpNormalized = texColor.r;
|
||||
|
||||
// 2. Discard if no coverage (validity check)
|
||||
if (rsrpNormalized < 0.01) {
|
||||
discard;
|
||||
}
|
||||
|
||||
// 3. Apply colormap AFTER interpolation
|
||||
vec3 color = rsrpToColor(rsrpNormalized);
|
||||
|
||||
// 4. Smooth boundary fading (optional)
|
||||
float boundaryAlpha = smoothstep(0.01, 0.05, rsrpNormalized);
|
||||
|
||||
gl_FragColor = vec4(color, boundaryAlpha * u_opacity);
|
||||
}
|
||||
```
|
||||
|
||||
### JavaScript зміни
|
||||
|
||||
```javascript
|
||||
// 1. Vertex shader (без змін)
|
||||
const vertexShaderSource = `
|
||||
attribute vec2 a_position;
|
||||
attribute vec2 a_texCoord;
|
||||
varying vec2 v_uv;
|
||||
void main() {
|
||||
gl_Position = vec4(a_position, 0.0, 1.0);
|
||||
v_uv = a_texCoord;
|
||||
}
|
||||
`;
|
||||
|
||||
// 2. При створенні texture — зберегти розміри
|
||||
const textureWidth = gridWidth;
|
||||
const textureHeight = gridHeight;
|
||||
|
||||
// 3. Передати uniform
|
||||
const textureSizeLocation = gl.getUniformLocation(program, 'u_textureSize');
|
||||
if (textureSizeLocation) {
|
||||
gl.uniform2f(textureSizeLocation, textureWidth, textureHeight);
|
||||
} else {
|
||||
console.error('[WebGL] u_textureSize uniform NOT FOUND!');
|
||||
}
|
||||
|
||||
// 4. Texture filtering — можна залишити LINEAR для fallback
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Етап 3: Texture Data Format
|
||||
|
||||
### Поточний формат (перевірити)
|
||||
|
||||
```javascript
|
||||
// Normalized RSRP value (0-255 mapped to 0.0-1.0 in shader)
|
||||
const normalized = (rsrp - minRsrp) / (maxRsrp - minRsrp);
|
||||
const value = Math.round(normalized * 255);
|
||||
|
||||
// Store in R channel
|
||||
textureData[idx] = value; // R = normalized RSRP
|
||||
textureData[idx + 1] = value; // G (можна використати для validity mask)
|
||||
textureData[idx + 2] = value; // B
|
||||
textureData[idx + 3] = 255; // A = fully opaque
|
||||
```
|
||||
|
||||
### Альтернатива: Float texture (краща точність)
|
||||
|
||||
```javascript
|
||||
// Якщо браузер підтримує OES_texture_float
|
||||
const ext = gl.getExtension('OES_texture_float');
|
||||
if (ext) {
|
||||
const floatData = new Float32Array(width * height);
|
||||
for (const point of points) {
|
||||
const normalized = (point.rsrp - minRsrp) / (maxRsrp - minRsrp);
|
||||
floatData[gridY * width + gridX] = normalized;
|
||||
}
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0,
|
||||
gl.LUMINANCE, gl.FLOAT, floatData);
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Чеклист імплементації
|
||||
|
||||
### Phase 1: Quick Test (Smoothstep)
|
||||
- [ ] Додати `u_textureSize` uniform
|
||||
- [ ] Замінити `texture2D()` на `textureSmooth()`
|
||||
- [ ] Тест на 50m і 200m
|
||||
- [ ] Тест zoom in/out
|
||||
|
||||
### Phase 2: Production (Catmull-Rom)
|
||||
- [ ] Імплементувати `SampleTextureCatmullRom()`
|
||||
- [ ] Оновити colormap function
|
||||
- [ ] Додати boundary fading
|
||||
- [ ] Тест edge cases (краї текстури)
|
||||
- [ ] Performance benchmark
|
||||
|
||||
### Phase 3: Polish
|
||||
- [ ] Видалити старі CSS blur workarounds
|
||||
- [ ] Видалити cellSize multiplication (не потрібно з Catmull-Rom)
|
||||
- [ ] Cleanup debug logs
|
||||
- [ ] Update version to v3.10.5
|
||||
|
||||
---
|
||||
|
||||
## Очікуваний результат
|
||||
|
||||
**До (GL_LINEAR):**
|
||||
```
|
||||
┌───┬───┬───┐
|
||||
│ A │ B │ C │ ← Видимі краї між клітинками
|
||||
├───┼───┼───┤ C0 continuity
|
||||
│ D │ E │ F │
|
||||
└───┴───┴───┘
|
||||
```
|
||||
|
||||
**Після (Catmull-Rom):**
|
||||
```
|
||||
╭───────────────╮
|
||||
│ ░░░▒▒▓▓██ │ ← Smooth gradient
|
||||
│ ░░░▒▒▓▓██▓▓ │ C1 continuity
|
||||
│ ░░▒▒▓▓██ │ Exact values at grid points
|
||||
╰───────────────╯
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Референси
|
||||
|
||||
1. [TheRealMJP's 9-tap Catmull-Rom HLSL](https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1)
|
||||
2. [Inigo Quilez - Better Texture Filtering](https://iquilezles.org/articles/texture/)
|
||||
3. [2D Catmull-Rom in 4 samples - Shadertoy](https://www.shadertoy.com/view/4tyGDD)
|
||||
4. [mapbox-gl-interpolate-heatmap](https://github.com/vinayakkulkarni/mapbox-gl-interpolate-heatmap)
|
||||
5. [NVIDIA GPU Gems 2 - Fast Third-Order Texture Filtering](https://developer.nvidia.com/gpugems/gpugems2/part-iii-high-quality-rendering/chapter-20-fast-third-order-texture-filtering)
|
||||
@@ -0,0 +1,149 @@
|
||||
# RFCP Session Summary — February 4, 2026
|
||||
## GPU Acceleration Complete: 195s → 11.2s (17.4x Speedup)
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Session Goal
|
||||
Complete GPU acceleration pipeline and optimize Full preset performance.
|
||||
|
||||
## 📊 Results
|
||||
|
||||
### Performance Achievement
|
||||
|
||||
| Metric | Before (3.7.0) | After (3.8.0) | Improvement |
|
||||
|--------|----------------|---------------|-------------|
|
||||
| **Full preset** (6640 pts, 50m) | 195s | **11.2s** | **17.4x** |
|
||||
| **Standard preset** (1975 pts, 200m) | 7.2s | **2.3s** (cached) | **3.1x** |
|
||||
| Phase 2.5 (distances+path_loss) | 0.33s | **0.006s** | 55x |
|
||||
| Phase 2.6 (terrain LOS) | 7.29s | **0.04s** | 182x |
|
||||
| Per-point (workers) | 1.1ms | **0.1ms** | 11x |
|
||||
|
||||
### GPU Pipeline (Final Architecture)
|
||||
|
||||
```
|
||||
Phase 1: OSM data fetch (Overpass API) ~6-10s (network)
|
||||
Phase 2: Terrain tile download + cache ~4s first / 0s cached
|
||||
Phase 2.5: GPU — distances + base path_loss 0.006s ⚡
|
||||
Phase 2.6: GPU — terrain LOS + diffraction loss 0.04s ⚡
|
||||
Phase 2.7: GPU — antenna pattern loss ~0s ⚡
|
||||
Phase 3: CPU workers — buildings + vegetation ~2s
|
||||
─────────────────────────────────────────────────
|
||||
TOTAL (cached): ~2.3s (Standard)
|
||||
TOTAL (cached): ~11.2s (Full)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Changes Made (Iterations 3.7.0 → 3.8.0)
|
||||
|
||||
### Iteration 3.7.0 — GPU Precompute Foundation
|
||||
- Added `gpu_manager` import to `coverage_service.py`
|
||||
- Grid arrays created on GPU (CuPy)
|
||||
- GPU precompute for distances + path_loss (vectorized)
|
||||
- Fixed critical bug: CuPy worker process crashes (CUDA context sharing)
|
||||
- Solution: GPU only in main process, workers use precomputed CPU values
|
||||
- Fixed frontend duplicate calculation guard
|
||||
|
||||
### Iteration 3.8.0 — Full Vectorization
|
||||
- **Phase 2.6**: `batch_terrain_los()` in `gpu_service.py`
|
||||
- Vectorized terrain profile sampling for ALL points simultaneously
|
||||
- Earth curvature correction vectorized
|
||||
- Fresnel clearance + diffraction loss vectorized
|
||||
- **Phase 2.7**: `batch_antenna_pattern()` in `gpu_service.py`
|
||||
- Workers receive precomputed `has_los`, `terrain_loss`, `antenna_loss`
|
||||
- Workers only compute buildings + reflections + vegetation
|
||||
|
||||
### Critical Fix: `_batch_elevation_lookup` Vectorization
|
||||
- **Before**: Python `for` loop over 59,250 coordinates (7.29s)
|
||||
- **After**: Vectorized NumPy tile indexing, loop only over tiles (0.04s)
|
||||
- **Impact**: 182x speedup on Phase 2.6 alone
|
||||
|
||||
### Critical Fix: Vegetation Bbox Pre-filter
|
||||
- **Before**: Each sample point checked ALL 683 vegetation polygons
|
||||
- **After**: Bounding box pre-filter skips 95%+ of polygons
|
||||
- **Impact**: Full preset 156s → 11.2s
|
||||
|
||||
---
|
||||
|
||||
## 📁 Files Modified
|
||||
|
||||
### Backend
|
||||
- `app/services/coverage_service.py` — precomputed values passthrough
|
||||
- `app/services/parallel_coverage_service.py` — 5 worker functions updated
|
||||
- `app/services/gpu_service.py` — batch_terrain_los, batch_antenna_pattern, batch_final_rsrp
|
||||
- `app/services/vegetation_service.py` — bbox pre-filter on _point_in_vegetation
|
||||
|
||||
### Build
|
||||
- PyInstaller ONEDIR build: 1.6 GB dist → 1.2 GB NSIS installer
|
||||
- CUDA DLLs bundled (cublas, cusparse, curand, etc.)
|
||||
- Runtime hook for DLL directory setup
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture (Final State)
|
||||
|
||||
```
|
||||
Main Process (asyncio event loop)
|
||||
├── Phase 2.5: GPU precompute
|
||||
│ └── CuPy arrays: distances, path_loss (vectorized)
|
||||
├── Phase 2.6: GPU terrain LOS
|
||||
│ └── Batch elevation lookup (vectorized NumPy)
|
||||
│ └── Earth curvature + Fresnel (CuPy)
|
||||
│ └── Diffraction loss (CuPy)
|
||||
├── Phase 2.7: GPU antenna pattern
|
||||
│ └── Bearing + pattern loss (CuPy)
|
||||
│
|
||||
└── Phase 3: CPU ProcessPool (3 workers)
|
||||
└── Receive precomputed dict per point
|
||||
└── Skip terrain/antenna (already computed)
|
||||
└── Only: buildings + reflections + vegetation
|
||||
└── Pure NumPy + CPU
|
||||
```
|
||||
|
||||
**Key Rule**: GPU (CuPy) code ONLY in main process. Workers never import gpu_manager.
|
||||
|
||||
---
|
||||
|
||||
## 🎮 Side Activity: Dwarf Fortress Gamelog Analysis
|
||||
|
||||
Analyzed 102,669-line gamelog from fort "Lashderush (Prophethandle)":
|
||||
- 8-9 years, 23 migrant waves, 1,943 masterpieces
|
||||
- 51,599 combat actions, only 4 deaths (weredeer outbreak)
|
||||
- Top crafter: Momuz Nëkorlibash (201 masterpieces)
|
||||
- Sole survivor transforms between dwarf/weredeer
|
||||
|
||||
---
|
||||
|
||||
## 🔮 Next Steps
|
||||
|
||||
### Immediate
|
||||
- [x] ~~GPU acceleration~~ ✅ COMPLETE
|
||||
- [ ] SRTM terrain data integration (higher accuracy than current tiles)
|
||||
- [ ] Session history persistence across app restarts
|
||||
|
||||
### Short Term
|
||||
- [ ] Multi-station dashboard
|
||||
- [ ] Project export/import (JSON)
|
||||
- [ ] Link budget analysis view
|
||||
|
||||
### Medium Term
|
||||
- [ ] LimeSDR hardware integration testing
|
||||
- [ ] Real RF validation against field measurements
|
||||
- [ ] 3D visualization mode
|
||||
|
||||
---
|
||||
|
||||
## 💡 Key Learnings
|
||||
|
||||
1. **Python for-loops are the enemy** — `_batch_elevation_lookup` went from 7.3s to 0.04s by replacing enumerate(zip()) with NumPy indexing
|
||||
2. **Spatial pre-filtering is massive** — vegetation bbox check eliminated 95%+ of polygon tests
|
||||
3. **GPU context can't be shared across processes** — spawn mode creates new CUDA contexts that OOM
|
||||
4. **Vectorize in main, distribute to workers** — best pattern for GPU + multiprocessing
|
||||
5. **Profile before optimizing** — Phase 2.6 bottleneck was invisible until measured
|
||||
|
||||
---
|
||||
|
||||
*Session duration: ~4 hours*
|
||||
*Lines of code changed: ~300*
|
||||
*Performance gain: 17.4x*
|
||||
*Feeling: 🚀*
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user