10 Commits

Author SHA1 Message Date
833dead43c @mytec: stack done, rust next 2026-02-07 12:56:25 +02:00
1d8375af02 @mytec: 10km grad works 2026-02-07 01:14:01 +02:00
acfd9b8f7b @mytec: WebGL works 2026-02-06 22:17:24 +02:00
81e078e92a @mytec: iter3.10 start, baseline rc ready 2026-02-04 15:56:09 +02:00
e392b449cc @mytec: 3.8.0a done 2026-02-04 00:50:52 +02:00
6dcc5a19b9 @mytec: 3.8.0 start, stable w/0 ref+ 2026-02-03 23:24:12 +02:00
6cd9d869cc @mytec: iter3.7.0 start, gpu calc int 2026-02-03 22:41:08 +02:00
a61753c642 @mytec: iter3.2.5 gpu polish start 2026-02-03 12:33:52 +02:00
20d19d09ae @mytec: iter3.5.1 ready for testing 2026-02-03 12:04:36 +02:00
255b91f257 @mytec iter3.5.1 start 2026-02-03 10:51:26 +02:00
85 changed files with 14101 additions and 252 deletions

View File

@@ -43,7 +43,13 @@
"Bash(kill:*)", "Bash(kill:*)",
"Bash(sort:*)", "Bash(sort:*)",
"Bash(journalctl:*)", "Bash(journalctl:*)",
"Bash(pkill:*)" "Bash(pkill:*)",
"Bash(pip3 list:*)",
"Bash(chmod:*)",
"Bash(pyinstaller:*)",
"Bash(npm i:*)",
"Bash(npm uninstall:*)",
"Bash(npm rebuild:*)"
] ]
} }
} }

4
.gitignore vendored
View File

@@ -24,3 +24,7 @@ installer/dist/
__pycache__/ __pycache__/
*.pyc *.pyc
nul nul
# PyInstaller build artifacts
backend/build/
backend/dist/

1513
RFCP-RUST-MIGRATION-PLAN.md Normal file

File diff suppressed because it is too large Load Diff

23
RFCP.bat Normal file
View File

@@ -0,0 +1,23 @@
@echo off
title RFCP - RF Coverage Planner
cd /d "%~dp0"
REM Check if backend exists
if not exist "backend\app\main.py" (
echo ERROR: RFCP backend not found.
echo Run install.bat first or check your installation.
pause
exit /b 1
)
echo ============================================
echo RFCP - RF Coverage Planner
echo ============================================
echo.
echo Starting backend server...
echo Open http://localhost:8090 in your browser
echo Press Ctrl+C to stop
echo.
cd backend
python -m uvicorn app.main:app --host 0.0.0.0 --port 8090

View File

@@ -14,6 +14,7 @@ from app.services.coverage_service import (
select_propagation_model, select_propagation_model,
) )
from app.services.parallel_coverage_service import CancellationToken from app.services.parallel_coverage_service import CancellationToken
from app.services.boundary_service import calculate_coverage_boundary
router = APIRouter() router = APIRouter()
@@ -24,6 +25,12 @@ class CoverageRequest(BaseModel):
settings: CoverageSettings = CoverageSettings() settings: CoverageSettings = CoverageSettings()
class BoundaryPoint(BaseModel):
"""Single boundary coordinate"""
lat: float
lon: float
class CoverageResponse(BaseModel): class CoverageResponse(BaseModel):
"""Coverage calculation response""" """Coverage calculation response"""
points: List[CoveragePoint] points: List[CoveragePoint]
@@ -32,6 +39,7 @@ class CoverageResponse(BaseModel):
stats: dict stats: dict
computation_time: float # seconds computation_time: float # seconds
models_used: List[str] # which models were active models_used: List[str] # which models were active
boundary: Optional[List[BoundaryPoint]] = None # coverage boundary polygon
@router.post("/calculate") @router.post("/calculate")
@@ -131,13 +139,24 @@ async def calculate_coverage(request: CoverageRequest) -> CoverageResponse:
"points_with_atmospheric_loss": sum(1 for p in points if p.atmospheric_loss > 0), "points_with_atmospheric_loss": sum(1 for p in points if p.atmospheric_loss > 0),
} }
# Calculate coverage boundary
boundary = None
if points:
boundary_coords = calculate_coverage_boundary(
[p.model_dump() for p in points],
threshold_dbm=request.settings.min_signal,
)
if boundary_coords:
boundary = [BoundaryPoint(**c) for c in boundary_coords]
return CoverageResponse( return CoverageResponse(
points=points, points=points,
count=len(points), count=len(points),
settings=effective_settings, settings=effective_settings,
stats=stats, stats=stats,
computation_time=round(computation_time, 2), computation_time=round(computation_time, 2),
models_used=models_used models_used=models_used,
boundary=boundary,
) )
@@ -249,6 +268,358 @@ async def get_buildings(
} }
@router.post("/link-budget")
async def calculate_link_budget(request: dict):
"""Calculate point-to-point link budget.
Body: {
"tx_lat": 48.46, "tx_lon": 35.04,
"tx_power_dbm": 43, "tx_gain_dbi": 18, "tx_cable_loss_db": 2,
"tx_height_m": 30,
"rx_lat": 48.50, "rx_lon": 35.10,
"rx_gain_dbi": 0, "rx_cable_loss_db": 0, "rx_sensitivity_dbm": -100,
"rx_height_m": 1.5,
"frequency_mhz": 1800
}
"""
import math
from app.services.terrain_service import terrain_service
# Extract parameters with defaults
tx_lat = request.get("tx_lat", 48.46)
tx_lon = request.get("tx_lon", 35.04)
tx_power_dbm = request.get("tx_power_dbm", 43)
tx_gain_dbi = request.get("tx_gain_dbi", 18)
tx_cable_loss_db = request.get("tx_cable_loss_db", 2)
tx_height_m = request.get("tx_height_m", 30)
rx_lat = request.get("rx_lat", 48.50)
rx_lon = request.get("rx_lon", 35.10)
rx_gain_dbi = request.get("rx_gain_dbi", 0)
rx_cable_loss_db = request.get("rx_cable_loss_db", 0)
rx_sensitivity_dbm = request.get("rx_sensitivity_dbm", -100)
rx_height_m = request.get("rx_height_m", 1.5)
freq = request.get("frequency_mhz", 1800)
# Calculate distance
distance_m = terrain_service.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
distance_km = distance_m / 1000
# Get elevations
tx_elev = await terrain_service.get_elevation(tx_lat, tx_lon)
rx_elev = await terrain_service.get_elevation(rx_lat, rx_lon)
# EIRP
eirp_dbm = tx_power_dbm + tx_gain_dbi - tx_cable_loss_db
# Free space path loss
if distance_km > 0:
fspl_db = 20 * math.log10(distance_km) + 20 * math.log10(freq) + 32.45
else:
fspl_db = 0
# Terrain profile for LOS check
profile = await terrain_service.get_elevation_profile(
tx_lat, tx_lon, rx_lat, rx_lon, num_points=100
)
# LOS check - does terrain block line of sight?
tx_total_height = tx_elev + tx_height_m
rx_total_height = rx_elev + rx_height_m
terrain_loss_db = 0.0
los_clear = True
obstructions = []
for i, point in enumerate(profile):
if i == 0 or i == len(profile) - 1:
continue
# Linear interpolation of LOS line at this point
fraction = i / (len(profile) - 1)
los_height = tx_total_height + fraction * (rx_total_height - tx_total_height)
if point["elevation"] > los_height:
los_clear = False
obstruction_height = point["elevation"] - los_height
obstructions.append({
"distance_m": point["distance"],
"height_above_los_m": round(obstruction_height, 1),
})
# Knife-edge diffraction estimate: ~6dB per major obstruction
terrain_loss_db += min(6.0, obstruction_height * 0.3)
# Cap terrain loss at reasonable max
terrain_loss_db = min(terrain_loss_db, 40.0)
total_path_loss = fspl_db + terrain_loss_db
# Received power
rx_power_dbm = eirp_dbm - total_path_loss + rx_gain_dbi - rx_cable_loss_db
# Link margin
margin_db = rx_power_dbm - rx_sensitivity_dbm
return {
"distance_km": round(distance_km, 2),
"distance_m": round(distance_m, 1),
"tx_elevation_m": round(tx_elev, 1),
"rx_elevation_m": round(rx_elev, 1),
"eirp_dbm": round(eirp_dbm, 1),
"fspl_db": round(fspl_db, 1),
"terrain_loss_db": round(terrain_loss_db, 1),
"total_path_loss_db": round(total_path_loss, 1),
"los_clear": los_clear,
"obstructions": obstructions,
"rx_power_dbm": round(rx_power_dbm, 1),
"margin_db": round(margin_db, 1),
"status": "OK" if margin_db >= 0 else "FAIL",
"link_budget": {
"tx_power_dbm": tx_power_dbm,
"tx_gain_dbi": tx_gain_dbi,
"tx_cable_loss_db": tx_cable_loss_db,
"rx_gain_dbi": rx_gain_dbi,
"rx_cable_loss_db": rx_cable_loss_db,
"rx_sensitivity_dbm": rx_sensitivity_dbm,
},
}
@router.post("/fresnel-profile")
async def fresnel_profile(request: dict):
"""Calculate terrain profile with Fresnel zone boundaries.
Body: {
"tx_lat": 48.46, "tx_lon": 35.04, "tx_height_m": 30,
"rx_lat": 48.50, "rx_lon": 35.10, "rx_height_m": 1.5,
"frequency_mhz": 1800,
"num_points": 100
}
"""
import math
from app.services.terrain_service import terrain_service
tx_lat = request.get("tx_lat", 48.46)
tx_lon = request.get("tx_lon", 35.04)
rx_lat = request.get("rx_lat", 48.50)
rx_lon = request.get("rx_lon", 35.10)
tx_height = request.get("tx_height_m", 30)
rx_height = request.get("rx_height_m", 1.5)
freq = request.get("frequency_mhz", 1800)
num_points = request.get("num_points", 100)
# Get terrain profile
profile = await terrain_service.get_elevation_profile(
tx_lat, tx_lon, rx_lat, rx_lon, num_points
)
if not profile:
return {"error": "Could not generate terrain profile"}
total_distance = profile[-1]["distance"] if profile else 0
# Get endpoint elevations
tx_elev = profile[0]["elevation"]
rx_elev = profile[-1]["elevation"]
tx_total = tx_elev + tx_height
rx_total = rx_elev + rx_height
wavelength = 300.0 / freq # meters
# Calculate Fresnel zone at each profile point
fresnel_data = []
los_blocked = False
fresnel_blocked = False
worst_clearance = float('inf')
fresnel_intrusion_count = 0
for i, point in enumerate(profile):
d1 = point["distance"] # distance from tx
d2 = total_distance - d1 # distance to rx
# LOS height at this point (linear interpolation)
if total_distance > 0:
fraction = d1 / total_distance
else:
fraction = 0
los_height = tx_total + fraction * (rx_total - tx_total)
# First Fresnel zone radius
if d1 > 0 and d2 > 0 and total_distance > 0:
f1_radius = math.sqrt((1 * wavelength * d1 * d2) / total_distance)
else:
f1_radius = 0
# Fresnel zone boundaries (height above sea level)
fresnel_top = los_height + f1_radius
fresnel_bottom = los_height - f1_radius
# Clearance: how much space between terrain and Fresnel bottom
clearance = fresnel_bottom - point["elevation"]
if clearance < worst_clearance:
worst_clearance = clearance
if point["elevation"] > los_height:
los_blocked = True
if point["elevation"] > fresnel_bottom:
fresnel_blocked = True
fresnel_intrusion_count += 1
fresnel_data.append({
"distance": round(point["distance"], 1),
"lat": point["lat"],
"lon": point["lon"],
"terrain_elevation": round(point["elevation"], 1),
"los_height": round(los_height, 1),
"fresnel_top": round(fresnel_top, 1),
"fresnel_bottom": round(fresnel_bottom, 1),
"f1_radius": round(f1_radius, 1),
"clearance": round(clearance, 1),
})
# Calculate Fresnel clearance percentage
fresnel_clear_pct = round(100 * (1 - fresnel_intrusion_count / len(profile)), 1) if profile else 100
# Estimate additional loss due to Fresnel obstruction
if los_blocked:
estimated_loss_db = 10 + abs(worst_clearance) * 0.5 # rough estimate
elif fresnel_blocked:
estimated_loss_db = 3 + (100 - fresnel_clear_pct) * 0.06 # 3-6 dB typical
else:
estimated_loss_db = 0
return {
"profile": fresnel_data,
"total_distance_m": round(total_distance, 1),
"tx_elevation": round(tx_elev, 1),
"rx_elevation": round(rx_elev, 1),
"frequency_mhz": freq,
"wavelength_m": round(wavelength, 4),
"los_clear": not los_blocked,
"fresnel_clear": not fresnel_blocked,
"fresnel_clear_pct": fresnel_clear_pct,
"worst_clearance_m": round(worst_clearance, 1),
"estimated_loss_db": round(estimated_loss_db, 1),
"recommendation": (
"Clear — excellent link" if not fresnel_blocked
else "Fresnel zone partially blocked — expect 3-6 dB additional loss"
if not los_blocked
else "LOS blocked — significant diffraction loss expected"
),
}
@router.post("/interference")
async def calculate_interference(request: CoverageRequest):
"""Calculate C/I (carrier-to-interference) ratio for multi-site scenario.
Uses the same request format as /calculate but returns interference analysis
instead of raw coverage. Requires 2+ sites to be meaningful.
Returns for each grid point:
- C/I ratio (carrier to interference) in dB
- Best server index
- Best server RSRP
"""
import numpy as np
from app.services.gpu_service import gpu_service
if len(request.sites) < 2:
raise HTTPException(400, "At least 2 sites required for interference analysis")
if len(request.sites) > 10:
raise HTTPException(400, "Maximum 10 sites per request")
# First calculate coverage for all sites
start_time = time.time()
cancel_token = CancellationToken()
try:
# Calculate coverage for each site individually
site_results = []
for site in request.sites:
points = await asyncio.wait_for(
coverage_service.calculate_coverage(
site,
request.settings,
cancel_token,
),
timeout=120.0, # 2 min per site
)
site_results.append(points)
except asyncio.TimeoutError:
cancel_token.cancel()
raise HTTPException(408, "Calculation timeout")
computation_time = time.time() - start_time
# Build coordinate -> RSRP mapping for each site
# We need to align the grids (same points for all sites)
coord_set = set()
for points in site_results:
for p in points:
coord_set.add((round(p.lat, 6), round(p.lon, 6)))
coord_list = sorted(coord_set)
# Build RSRP arrays aligned to coord_list
rsrp_grids = []
frequencies = []
for idx, (site, points) in enumerate(zip(request.sites, site_results)):
# Map coordinates to RSRP
point_map = {(round(p.lat, 6), round(p.lon, 6)): p.rsrp for p in points}
rsrp_array = np.array([
point_map.get(coord, -150) # -150 dBm = no coverage
for coord in coord_list
], dtype=np.float64)
rsrp_grids.append(rsrp_array)
frequencies.append(site.frequency)
# Calculate C/I using GPU service
ci_ratio, best_server_idx, best_rsrp = gpu_service.calculate_interference_vectorized(
rsrp_grids, frequencies
)
# Build result points with C/I data
ci_points = []
for i, (lat, lon) in enumerate(coord_list):
ci_points.append({
"lat": lat,
"lon": lon,
"ci_ratio_db": round(float(ci_ratio[i]), 1),
"best_server_idx": int(best_server_idx[i]),
"best_server_rsrp": round(float(best_rsrp[i]), 1),
})
# Calculate statistics
ci_values = [p["ci_ratio_db"] for p in ci_points]
stats = {
"min_ci_db": round(min(ci_values), 1) if ci_values else 0,
"max_ci_db": round(max(ci_values), 1) if ci_values else 0,
"avg_ci_db": round(sum(ci_values) / len(ci_values), 1) if ci_values else 0,
"good_coverage_pct": round(100 * sum(1 for c in ci_values if c >= 10) / len(ci_values), 1) if ci_values else 0,
"marginal_coverage_pct": round(100 * sum(1 for c in ci_values if 0 <= c < 10) / len(ci_values), 1) if ci_values else 0,
"interference_dominant_pct": round(100 * sum(1 for c in ci_values if c < 0) / len(ci_values), 1) if ci_values else 0,
}
# Check for frequency groups
unique_freqs = set(frequencies)
freq_groups = {}
for freq in unique_freqs:
freq_groups[freq] = sum(1 for f in frequencies if f == freq)
return {
"points": ci_points,
"count": len(ci_points),
"stats": stats,
"computation_time": round(computation_time, 2),
"sites": [{"name": s.name, "frequency_mhz": s.frequency} for s in request.sites],
"frequency_groups": freq_groups,
"warning": None if any(c > 1 for c in freq_groups.values()) else "All sites on different frequencies - no co-channel interference",
}
def _get_active_models(settings: CoverageSettings) -> List[str]: def _get_active_models(settings: CoverageSettings) -> List[str]:
"""Determine which propagation models are active""" """Determine which propagation models are active"""
models = [] # Base propagation model added by caller via select_propagation_model() models = [] # Base propagation model added by caller via select_propagation_model()

View File

@@ -33,3 +33,9 @@ async def gpu_set_device(request: SetDeviceRequest):
return {"status": "ok", **result} return {"status": "ok", **result}
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))
@router.get("/diagnostics")
async def gpu_diagnostics():
"""Full GPU diagnostic info for troubleshooting detection issues."""
return gpu_manager.get_diagnostics()

View File

@@ -1,12 +1,29 @@
import sys
import platform
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
from app.api.deps import get_db from app.api.deps import get_db
from app.services.gpu_backend import gpu_manager
router = APIRouter() router = APIRouter()
@router.get("/") @router.get("/")
async def health_check(): async def health_check():
return {"status": "ok", "service": "rfcp-backend", "version": "1.1.0"} gpu_info = gpu_manager.get_status()
return {
"status": "ok",
"service": "rfcp-backend",
"version": "3.6.0",
"build": "gpu" if gpu_info.get("gpu_available") else "cpu",
"gpu": {
"available": gpu_info.get("gpu_available", False),
"backend": gpu_info.get("active_backend", "cpu"),
"device": gpu_info.get("active_device", {}).get("name") if gpu_info.get("active_device") else "CPU",
},
"python": sys.version.split()[0],
"platform": platform.system(),
}
@router.get("/db") @router.get("/db")

View File

@@ -180,3 +180,93 @@ async def get_terrain_file(region: str):
if os.path.exists(terrain_path): if os.path.exists(terrain_path):
return FileResponse(terrain_path) return FileResponse(terrain_path)
raise HTTPException(status_code=404, detail=f"Region '{region}' not found") raise HTTPException(status_code=404, detail=f"Region '{region}' not found")
@router.get("/status")
async def terrain_status():
"""Return terrain data availability info."""
cached_tiles = terrain_service.get_cached_tiles()
cache_size = terrain_service.get_cache_size_mb()
# Categorize by resolution based on file size
srtm1_tiles = []
srtm3_tiles = []
for t in cached_tiles:
tile_path = terrain_service.terrain_path / f"{t}.hgt"
try:
if tile_path.stat().st_size == 3601 * 3601 * 2:
srtm1_tiles.append(t)
else:
srtm3_tiles.append(t)
except Exception:
pass
return {
"total_tiles": len(cached_tiles),
"srtm1": {
"count": len(srtm1_tiles),
"resolution_m": 30,
"tiles": sorted(srtm1_tiles),
},
"srtm3": {
"count": len(srtm3_tiles),
"resolution_m": 90,
"tiles": sorted(srtm3_tiles),
},
"cache_size_mb": round(cache_size, 1),
"memory_cached": len(terrain_service._tile_cache),
"terra_server": "https://terra.eliah.one",
}
@router.post("/download")
async def terrain_download(request: dict):
"""Pre-download tiles for a region.
Body: {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 50}
Or: {"tiles": ["N48E034", "N48E035", "N47E034", "N47E035"]}
"""
if "tiles" in request:
tile_list = request["tiles"]
else:
center_lat = request.get("center_lat", 48.46)
center_lon = request.get("center_lon", 35.04)
radius_km = request.get("radius_km", 50)
tile_list = terrain_service.get_required_tiles(center_lat, center_lon, radius_km)
missing = [t for t in tile_list if not terrain_service.get_tile_path(t).exists()]
if not missing:
return {"status": "ok", "message": "All tiles already cached", "count": len(tile_list)}
# Download missing tiles
downloaded = []
failed = []
for tile_name in missing:
success = await terrain_service.download_tile(tile_name)
if success:
downloaded.append(tile_name)
else:
failed.append(tile_name)
return {
"status": "ok",
"required": len(tile_list),
"already_cached": len(tile_list) - len(missing),
"downloaded": downloaded,
"failed": failed,
}
@router.get("/index")
async def terrain_index():
"""Fetch tile index from terra server."""
import httpx
try:
async with httpx.AsyncClient(timeout=10.0) as client:
resp = await client.get("https://terra.eliah.one/api/index")
if resp.status_code == 200:
return resp.json()
except Exception:
pass
return {"error": "Could not reach terra.eliah.one", "offline": True}

View File

@@ -1,4 +1,6 @@
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
import logging
import platform
from fastapi import FastAPI, WebSocket from fastapi import FastAPI, WebSocket
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
@@ -7,9 +9,54 @@ from app.core.database import connect_to_mongo, close_mongo_connection
from app.api.routes import health, projects, terrain, coverage, regions, system, gpu from app.api.routes import health, projects, terrain, coverage, regions, system, gpu
from app.api.websocket import websocket_endpoint from app.api.websocket import websocket_endpoint
logger = logging.getLogger("rfcp.startup")
def check_gpu_availability():
"""Log GPU status on startup for debugging."""
is_wsl = "microsoft" in platform.release().lower()
env_note = " (WSL2)" if is_wsl else ""
# Check CuPy / CUDA
try:
import cupy as cp
device_count = cp.cuda.runtime.getDeviceCount()
if device_count > 0:
props = cp.cuda.runtime.getDeviceProperties(0)
name = props["name"]
if isinstance(name, bytes):
name = name.decode()
mem_mb = props["totalGlobalMem"] // (1024 * 1024)
logger.info(f"GPU detected{env_note}: {name} ({mem_mb} MB VRAM)")
logger.info(f"CuPy {cp.__version__}, CUDA devices: {device_count}")
else:
logger.warning(f"CuPy installed but no CUDA devices found{env_note}")
except Exception as e:
logger.warning(f"CuPy FAILED {env_note}: {e}")
if is_wsl:
logger.warning("Install: pip3 install cupy-cuda12x --break-system-packages")
else:
logger.warning("Install: pip install cupy-cuda12x")
except Exception as e:
logger.warning(f"CuPy error{env_note}: {e}")
# Check PyOpenCL
try:
import pyopencl as cl
platforms = cl.get_platforms()
for p in platforms:
for d in p.get_devices():
logger.info(f"OpenCL device: {d.name.strip()}")
except Exception as e:
logger.debug("PyOpenCL not installed (optional)")
except Exception:
pass
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
# Log GPU status on startup
check_gpu_availability()
await connect_to_mongo() await connect_to_mongo()
yield yield
await close_mongo_connection() await close_mongo_connection()

View File

@@ -0,0 +1,122 @@
"""
Coverage boundary calculation service.
Computes concave hull (alpha shape) from coverage points to generate
a realistic boundary that follows actual coverage contour.
"""
import logging
from typing import Optional
logger = logging.getLogger(__name__)
def calculate_coverage_boundary(
points: list[dict],
threshold_dbm: float = -100,
simplify_tolerance: float = 0.001,
) -> list[dict]:
"""
Calculate coverage boundary as concave hull of points above threshold.
Args:
points: List of coverage points with 'lat', 'lon', 'rsrp' keys
threshold_dbm: RSRP threshold - points below this are excluded
simplify_tolerance: Simplification tolerance in degrees (~100m per 0.001)
Returns:
List of {'lat': float, 'lon': float} coordinates forming boundary polygon.
Empty list if boundary cannot be computed.
"""
try:
from shapely.geometry import MultiPoint
from shapely import concave_hull
except ImportError:
logger.warning("Shapely not installed - boundary calculation disabled")
return []
# Filter points above threshold
valid_coords = [
(p['lon'], p['lat']) # Shapely uses (x, y) = (lon, lat)
for p in points
if p.get('rsrp', -999) >= threshold_dbm
]
if len(valid_coords) < 3:
logger.debug(f"Not enough points for boundary: {len(valid_coords)}")
return []
try:
# Create MultiPoint geometry
mp = MultiPoint(valid_coords)
# Compute concave hull (alpha shape)
# ratio: 0 = convex hull, 1 = very tight fit
# 0.3-0.5 gives good balance between detail and smoothness
hull = concave_hull(mp, ratio=0.3)
if hull.is_empty:
logger.debug("Concave hull is empty")
return []
# Simplify to reduce points (0.001 deg ≈ 100m)
if simplify_tolerance > 0:
hull = hull.simplify(simplify_tolerance, preserve_topology=True)
# Extract coordinates based on geometry type
if hull.geom_type == 'Polygon':
coords = list(hull.exterior.coords)
return [{'lat': c[1], 'lon': c[0]} for c in coords]
elif hull.geom_type == 'MultiPolygon':
# Return largest polygon's exterior
largest = max(hull.geoms, key=lambda g: g.area)
coords = list(largest.exterior.coords)
return [{'lat': c[1], 'lon': c[0]} for c in coords]
elif hull.geom_type == 'GeometryCollection':
# Find polygons in collection
polygons = [g for g in hull.geoms if g.geom_type == 'Polygon']
if polygons:
largest = max(polygons, key=lambda g: g.area)
coords = list(largest.exterior.coords)
return [{'lat': c[1], 'lon': c[0]} for c in coords]
logger.debug(f"Unexpected hull geometry type: {hull.geom_type}")
return []
except Exception as e:
logger.warning(f"Boundary calculation error: {e}")
return []
def calculate_multi_site_boundaries(
points: list[dict],
threshold_dbm: float = -100,
) -> dict[str, list[dict]]:
"""
Calculate separate boundaries for each site's coverage area.
Args:
points: Coverage points with 'lat', 'lon', 'rsrp', 'site_id' keys
threshold_dbm: RSRP threshold
Returns:
Dict mapping site_id to boundary coordinates list.
"""
# Group points by site_id
by_site: dict[str, list[dict]] = {}
for p in points:
site_id = p.get('site_id', 'default')
if site_id not in by_site:
by_site[site_id] = []
by_site[site_id].append(p)
# Calculate boundary for each site
boundaries = {}
for site_id, site_points in by_site.items():
boundary = calculate_coverage_boundary(site_points, threshold_dbm)
if boundary:
boundaries[site_id] = boundary
return boundaries

View File

@@ -62,6 +62,9 @@ from app.services.parallel_coverage_service import (
calculate_coverage_parallel, get_cpu_count, get_parallel_backend, calculate_coverage_parallel, get_cpu_count, get_parallel_backend,
CancellationToken, CancellationToken,
) )
# NOTE: gpu_manager and gpu_service are imported INSIDE functions that need them,
# NOT at module level. This prevents worker processes from initializing CuPy/CUDA
# which causes cudaErrorInsufficientDriver errors in child processes.
# ── New propagation models (Phase 3.0) ── # ── New propagation models (Phase 3.0) ──
from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput from app.propagation.base import PropagationModel, PropagationInput, PropagationOutput
@@ -523,19 +526,33 @@ class CoverageService:
progress_fn("Loading terrain", 0.25) progress_fn("Loading terrain", 0.25)
await asyncio.sleep(0) await asyncio.sleep(0)
t_terrain = time.time() t_terrain = time.time()
# Check for missing tiles before attempting download
radius_km = settings.radius / 1000.0
missing_tiles = self.terrain.get_missing_tiles(site.lat, site.lon, radius_km)
if missing_tiles:
_clog(f"⚠ Missing terrain tiles: {missing_tiles} - will attempt download")
tile_names = await self.terrain.ensure_tiles_for_bbox( tile_names = await self.terrain.ensure_tiles_for_bbox(
min_lat, min_lon, max_lat, max_lon min_lat, min_lon, max_lat, max_lon
) )
for tn in tile_names: for tn in tile_names:
self.terrain._load_tile(tn) self.terrain._load_tile(tn)
# Check what actually loaded
loaded_tiles = [tn for tn in tile_names if tn in self.terrain._tile_cache]
failed_tiles = [tn for tn in tile_names if tn not in self.terrain._tile_cache]
if failed_tiles:
_clog(f"⚠ TERRAIN WARNING: Failed to load tiles {failed_tiles}. "
"Coverage accuracy reduced - using flat terrain for affected areas.")
site_elevation = self.terrain.get_elevation_sync(site.lat, site.lon) site_elevation = self.terrain.get_elevation_sync(site.lat, site.lon)
point_elevations = {} point_elevations = {}
for lat, lon in grid: for lat, lon in grid:
point_elevations[(lat, lon)] = self.terrain.get_elevation_sync(lat, lon) point_elevations[(lat, lon)] = self.terrain.get_elevation_sync(lat, lon)
terrain_time = time.time() - t_terrain terrain_time = time.time() - t_terrain
_clog(f"Tiles: {len(tile_names)}, site elev: {site_elevation:.0f}m, " _clog(f"Tiles: {len(loaded_tiles)}/{len(tile_names)} loaded, site elev: {site_elevation:.0f}m, "
f"pre-computed {len(grid)} elevations") f"pre-computed {len(grid)} elevations")
_clog(f"━━━ PHASE 2 done: {terrain_time:.1f}s ━━━") _clog(f"━━━ PHASE 2 done: {terrain_time:.1f}s ━━━")
@@ -546,8 +563,11 @@ class CoverageService:
from app.services.gpu_service import gpu_service from app.services.gpu_service import gpu_service
t_gpu = time.time() t_gpu = time.time()
grid_lats = np.array([lat for lat, lon in grid]) # Import GPU modules here (main process only) to avoid CUDA context issues in workers
grid_lons = np.array([lon for lat, lon in grid]) from app.services.gpu_backend import gpu_manager
xp = gpu_manager.get_array_module()
grid_lats = xp.array([lat for lat, lon in grid], dtype=xp.float64)
grid_lons = xp.array([lon for lat, lon in grid], dtype=xp.float64)
pre_distances = gpu_service.precompute_distances( pre_distances = gpu_service.precompute_distances(
grid_lats, grid_lons, site.lat, site.lon grid_lats, grid_lons, site.lat, site.lon
@@ -556,6 +576,9 @@ class CoverageService:
pre_distances, site.frequency, site.height, pre_distances, site.frequency, site.height,
environment=getattr(settings, 'environment', 'urban'), environment=getattr(settings, 'environment', 'urban'),
) )
gpu_time = time.time() - t_gpu
backend_name = "GPU (CUDA)" if gpu_manager.gpu_available else "CPU (NumPy)"
_clog(f"Precomputed {len(grid)} distances+path_loss on {backend_name} in {gpu_time:.2f}s")
# Build lookup dict for point loop # Build lookup dict for point loop
precomputed = {} precomputed = {}
@@ -572,6 +595,60 @@ class CoverageService:
f"({len(grid)} points, model={selected_model.name}, freq={site.frequency}MHz, " f"({len(grid)} points, model={selected_model.name}, freq={site.frequency}MHz, "
f"env={env}, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━") f"env={env}, backend={'GPU' if gpu_service.available else 'CPU/NumPy'}) ━━━")
# ━━━ PHASE 2.6: GPU-Vectorized Terrain LOS + Diffraction ━━━
# This replaces the per-point LOS calculation in workers
t_batch_terrain = time.time()
grid_elevs = np.array([point_elevations.get((lat, lon), 0.0) for lat, lon in grid])
if settings.use_terrain and gpu_service.available:
_clog("━━━ PHASE 2.6: Batch terrain LOS (GPU) ━━━")
has_los_arr, terrain_loss_arr = gpu_service.batch_terrain_los(
site.lat, site.lon, site.height, site_elevation,
grid_lats.get() if hasattr(grid_lats, 'get') else grid_lats,
grid_lons.get() if hasattr(grid_lons, 'get') else grid_lons,
grid_elevs,
pre_distances,
site.frequency,
self.terrain._tile_cache,
num_samples=30,
)
batch_terrain_time = time.time() - t_batch_terrain
blocked_count = np.sum(~has_los_arr)
_clog(f"━━━ PHASE 2.6 done: {batch_terrain_time:.2f}s "
f"({blocked_count}/{len(grid)} blocked by terrain) ━━━")
# Add terrain results to precomputed dict
for i, (lat, lon) in enumerate(grid):
if (lat, lon) in precomputed:
precomputed[(lat, lon)]['has_los'] = bool(has_los_arr[i])
precomputed[(lat, lon)]['terrain_loss'] = float(terrain_loss_arr[i])
else:
_clog("━━━ PHASE 2.6: Skipped (terrain disabled or no GPU) ━━━")
# Initialize with defaults
for lat, lon in grid:
if (lat, lon) in precomputed:
precomputed[(lat, lon)]['has_los'] = True
precomputed[(lat, lon)]['terrain_loss'] = 0.0
# ━━━ PHASE 2.7: GPU-Vectorized Antenna Pattern ━━━
if site.azimuth is not None and site.beamwidth and gpu_service.available:
t_batch_antenna = time.time()
antenna_loss_arr = gpu_service.batch_antenna_pattern(
site.lat, site.lon,
grid_lats.get() if hasattr(grid_lats, 'get') else grid_lats,
grid_lons.get() if hasattr(grid_lons, 'get') else grid_lons,
site.azimuth,
site.beamwidth,
)
for i, (lat, lon) in enumerate(grid):
if (lat, lon) in precomputed:
precomputed[(lat, lon)]['antenna_loss'] = float(antenna_loss_arr[i])
_clog(f"━━━ PHASE 2.7: Batch antenna pattern done: {time.time() - t_batch_antenna:.2f}s ━━━")
else:
for lat, lon in grid:
if (lat, lon) in precomputed:
precomputed[(lat, lon)]['antenna_loss'] = 0.0
# ━━━ PHASE 3: Point calculation ━━━ # ━━━ PHASE 3: Point calculation ━━━
dominant_path_service._log_count = 0 # Reset diagnostic counter dominant_path_service._log_count = 0 # Reset diagnostic counter
t_points = time.time() t_points = time.time()
@@ -918,9 +995,12 @@ class CoverageService:
await asyncio.sleep(0) await asyncio.sleep(0)
from app.services.gpu_service import gpu_service from app.services.gpu_service import gpu_service
from app.services.gpu_backend import gpu_manager
grid_lats = np.array([lat for lat, _lon in tile_grid]) t_gpu = time.time()
grid_lons = np.array([_lon for _lat, _lon in tile_grid]) xp = gpu_manager.get_array_module()
grid_lats = xp.array([lat for lat, _lon in tile_grid], dtype=xp.float64)
grid_lons = xp.array([_lon for _lat, _lon in tile_grid], dtype=xp.float64)
pre_distances = gpu_service.precompute_distances( pre_distances = gpu_service.precompute_distances(
grid_lats, grid_lons, site.lat, site.lon, grid_lats, grid_lons, site.lat, site.lon,
@@ -929,6 +1009,9 @@ class CoverageService:
pre_distances, site.frequency, site.height, pre_distances, site.frequency, site.height,
environment=getattr(settings, 'environment', 'urban'), environment=getattr(settings, 'environment', 'urban'),
) )
gpu_time = time.time() - t_gpu
backend_name = "GPU (CUDA)" if gpu_manager.gpu_available else "CPU (NumPy)"
_clog(f"Tile {tile_idx+1}: precomputed {len(tile_grid)} pts on {backend_name} in {gpu_time:.2f}s")
precomputed = {} precomputed = {}
for i, (lat, lon) in enumerate(tile_grid): for i, (lat, lon) in enumerate(tile_grid):
@@ -1102,6 +1185,9 @@ class CoverageService:
timing, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if point.rsrp >= settings.min_signal: if point.rsrp >= settings.min_signal:
points.append(point) points.append(point)
@@ -1124,6 +1210,9 @@ class CoverageService:
timing: dict, timing: dict,
precomputed_distance: Optional[float] = None, precomputed_distance: Optional[float] = None,
precomputed_path_loss: Optional[float] = None, precomputed_path_loss: Optional[float] = None,
precomputed_has_los: Optional[bool] = None,
precomputed_terrain_loss: Optional[float] = None,
precomputed_antenna_loss: Optional[float] = None,
) -> CoveragePoint: ) -> CoveragePoint:
"""Fully synchronous point calculation. All terrain tiles must be pre-loaded.""" """Fully synchronous point calculation. All terrain tiles must be pre-loaded."""
@@ -1150,29 +1239,37 @@ class CoverageService:
) )
path_loss = model.calculate(prop_input).path_loss_db path_loss = model.calculate(prop_input).path_loss_db
# Antenna pattern # Antenna pattern (use precomputed if available)
antenna_loss = 0.0 if precomputed_antenna_loss is not None:
if site.azimuth is not None and site.beamwidth: antenna_loss = precomputed_antenna_loss
elif site.azimuth is not None and site.beamwidth:
t0 = time.time() t0 = time.time()
antenna_loss = self._antenna_pattern_loss( antenna_loss = self._antenna_pattern_loss(
site.lat, site.lon, lat, lon, site.azimuth, site.beamwidth site.lat, site.lon, lat, lon, site.azimuth, site.beamwidth
) )
timing["antenna"] += time.time() - t0 timing["antenna"] += time.time() - t0
else:
antenna_loss = 0.0
# Terrain LOS (sync) # Terrain LOS (use precomputed if available)
terrain_loss = 0.0 if precomputed_has_los is not None and precomputed_terrain_loss is not None:
has_los = True has_los = precomputed_has_los
if settings.use_terrain: terrain_loss = precomputed_terrain_loss
elif settings.use_terrain:
t0 = time.time() t0 = time.time()
los_result = self.los.check_line_of_sight_sync( los_result = self.los.check_line_of_sight_sync(
site.lat, site.lon, site.height, lat, lon, 1.5 site.lat, site.lon, site.height, lat, lon, 1.5
) )
has_los = los_result["has_los"] has_los = los_result["has_los"]
terrain_loss = 0.0
if not has_los: if not has_los:
terrain_loss = self._diffraction_loss( terrain_loss = self._diffraction_loss(
los_result["clearance"], site.frequency los_result["clearance"], site.frequency
) )
timing["los"] += time.time() - t0 timing["los"] += time.time() - t0
else:
has_los = True
terrain_loss = 0.0
# Building loss (spatial index) # Building loss (spatial index)
building_loss = 0.0 building_loss = 0.0
@@ -1405,14 +1502,18 @@ class CoverageService:
lat2: float, lon2: float lat2: float, lon2: float
) -> float: ) -> float:
"""Calculate bearing from point 1 to point 2 (degrees)""" """Calculate bearing from point 1 to point 2 (degrees)"""
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2]) # Use math for scalar operations (faster than numpy/cupy for single values)
lat1_r = math.radians(lat1)
lon1_r = math.radians(lon1)
lat2_r = math.radians(lat2)
lon2_r = math.radians(lon2)
dlon = lon2 - lon1 dlon = lon2_r - lon1_r
x = np.sin(dlon) * np.cos(lat2) x = math.sin(dlon) * math.cos(lat2_r)
y = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(dlon) y = math.cos(lat1_r) * math.sin(lat2_r) - math.sin(lat1_r) * math.cos(lat2_r) * math.cos(dlon)
bearing = np.degrees(np.arctan2(x, y)) bearing = math.degrees(math.atan2(x, y))
return (bearing + 360) % 360 return (bearing + 360) % 360

View File

@@ -167,6 +167,89 @@ class GPUManager:
for d in self._devices for d in self._devices
] ]
def get_diagnostics(self) -> dict:
"""Full diagnostic info for troubleshooting GPU detection."""
import sys
import platform
import subprocess
is_wsl = "microsoft" in platform.release().lower()
diag = {
"python_version": sys.version,
"python_executable": sys.executable,
"platform": platform.platform(),
"is_wsl": is_wsl,
"numpy": {"version": np.__version__},
"cuda": {},
"opencl": {},
"nvidia_smi": None,
"detected_devices": len(self._devices),
"active_backend": self._active_backend.value,
}
# Check nvidia-smi (works even without CuPy)
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,memory.total,driver_version", "--format=csv,noheader"],
capture_output=True, text=True, timeout=5
)
if result.returncode == 0 and result.stdout.strip():
diag["nvidia_smi"] = result.stdout.strip()
except Exception:
diag["nvidia_smi"] = "not found or error"
# Check CuPy/CUDA
try:
import cupy as cp
diag["cuda"]["cupy_version"] = cp.__version__
diag["cuda"]["cuda_runtime_version"] = cp.cuda.runtime.runtimeGetVersion()
diag["cuda"]["device_count"] = cp.cuda.runtime.getDeviceCount()
for i in range(diag["cuda"]["device_count"]):
props = cp.cuda.runtime.getDeviceProperties(i)
name = props["name"]
if isinstance(name, bytes):
name = name.decode()
diag["cuda"][f"device_{i}"] = {
"name": str(name),
"memory_mb": props["totalGlobalMem"] // (1024 * 1024),
"compute_capability": f"{props['major']}.{props['minor']}",
}
except ImportError:
diag["cuda"]["error"] = "CuPy not installed"
if is_wsl:
diag["cuda"]["install_hint"] = "pip3 install cupy-cuda12x --break-system-packages"
else:
diag["cuda"]["install_hint"] = "pip install cupy-cuda12x"
except Exception as e:
diag["cuda"]["error"] = str(e)
# Check PyOpenCL
try:
import pyopencl as cl
diag["opencl"]["pyopencl_version"] = cl.VERSION_TEXT
diag["opencl"]["platforms"] = []
for p in cl.get_platforms():
platform_info = {"name": p.name.strip(), "devices": []}
for d in p.get_devices():
platform_info["devices"].append({
"name": d.name.strip(),
"type": cl.device_type.to_string(d.type),
"memory_mb": d.global_mem_size // (1024 * 1024),
"compute_units": d.max_compute_units,
})
diag["opencl"]["platforms"].append(platform_info)
except ImportError:
diag["opencl"]["error"] = "PyOpenCL not installed"
if is_wsl:
diag["opencl"]["install_hint"] = "pip3 install pyopencl --break-system-packages"
else:
diag["opencl"]["install_hint"] = "pip install pyopencl"
except Exception as e:
diag["opencl"]["error"] = str(e)
return diag
def set_device(self, backend: str, index: int = 0) -> dict: def set_device(self, backend: str, index: int = 0) -> dict:
"""Switch active compute device.""" """Switch active compute device."""
target_backend = GPUBackend(backend) target_backend = GPUBackend(backend)

View File

@@ -139,6 +139,436 @@ class GPUService:
return _to_cpu(L) return _to_cpu(L)
def batch_terrain_los(
self,
site_lat: float,
site_lon: float,
site_height: float,
site_elevation: float,
grid_lats: np.ndarray,
grid_lons: np.ndarray,
grid_elevations: np.ndarray,
distances: np.ndarray,
frequency_mhz: float,
terrain_cache: dict,
num_samples: int = 30,
) -> tuple[np.ndarray, np.ndarray]:
"""Batch compute terrain LOS and diffraction loss for all grid points.
This is the key GPU optimization — instead of sampling terrain profiles
one point at a time, we sample ALL profiles in parallel using vectorized
operations.
Args:
site_lat, site_lon: Site coordinates
site_height: Antenna height above ground (meters)
site_elevation: Ground elevation at site (meters)
grid_lats, grid_lons: All grid point coordinates
grid_elevations: Ground elevation at each grid point
distances: Pre-computed distances from site to each point (meters)
frequency_mhz: Frequency for diffraction calculation
terrain_cache: Dict[tile_name -> numpy array] from terrain_service
num_samples: Number of samples per terrain profile
Returns:
(has_los, terrain_loss) - both shape (N,)
has_los: boolean array, True if clear line of sight
terrain_loss: diffraction loss in dB (0 if has_los)
"""
_xp = gpu_manager.get_array_module()
N = len(grid_lats)
if N == 0:
return np.array([], dtype=bool), np.array([], dtype=np.float64)
# Convert inputs to GPU arrays
g_lats = _xp.asarray(grid_lats, dtype=_xp.float64)
g_lons = _xp.asarray(grid_lons, dtype=_xp.float64)
g_elevs = _xp.asarray(grid_elevations, dtype=_xp.float64)
g_dists = _xp.asarray(distances, dtype=_xp.float64)
# Heights
tx_total = float(site_elevation + site_height)
rx_height = 1.5 # Receiver height above ground
# Earth curvature constants
EARTH_RADIUS = 6371000.0
K_FACTOR = 4.0 / 3.0
effective_radius = K_FACTOR * EARTH_RADIUS
# Sample terrain profiles for all points at once
# Create sample positions: shape (N, num_samples)
t = _xp.linspace(0, 1, num_samples, dtype=_xp.float64) # (S,)
t = t.reshape(1, -1) # (1, S)
# Interpolate lat/lon for all sample points
# sample_lats[i, j] = site_lat + t[j] * (grid_lats[i] - site_lat)
dlat = g_lats.reshape(-1, 1) - site_lat # (N, 1)
dlon = g_lons.reshape(-1, 1) - site_lon # (N, 1)
sample_lats = site_lat + t * dlat # (N, S)
sample_lons = site_lon + t * dlon # (N, S)
# Sample distances along path: shape (N, S)
sample_dists = t * g_dists.reshape(-1, 1) # (N, S)
# Get terrain elevations for all samples
# This is the tricky part - we need to look up from the tile cache
# For GPU efficiency, we'll do this on CPU then transfer
sample_lats_cpu = _to_cpu(sample_lats).flatten()
sample_lons_cpu = _to_cpu(sample_lons).flatten()
# Batch elevation lookup from cache
sample_elevs_cpu = self._batch_elevation_lookup(
sample_lats_cpu, sample_lons_cpu, terrain_cache
)
sample_elevs = _xp.asarray(sample_elevs_cpu, dtype=_xp.float64).reshape(N, num_samples)
# Compute LOS line height at each sample point
# Linear interpolation from tx to rx
rx_total = g_elevs + rx_height # (N,)
los_heights = tx_total + t * (rx_total.reshape(-1, 1) - tx_total) # (N, S)
# Earth curvature correction at each sample
total_dist = g_dists.reshape(-1, 1) # (N, 1)
d = sample_dists # (N, S)
curvature = (d * (total_dist - d)) / (2 * effective_radius) # (N, S)
los_heights_corrected = los_heights - curvature # (N, S)
# Clearance at each sample point
clearances = los_heights_corrected - sample_elevs # (N, S)
# Minimum clearance per profile
min_clearances = _xp.min(clearances, axis=1) # (N,)
# Has LOS if minimum clearance > 0
has_los = min_clearances > 0 # (N,)
# Diffraction loss for points without LOS
# Using simplified ITU-R P.526 formula
terrain_loss = _xp.zeros(N, dtype=_xp.float64)
# Only compute diffraction where blocked
blocked_mask = ~has_los
blocked_clearances = min_clearances[blocked_mask]
if _xp.any(blocked_mask):
# v = |clearance| / 10 (simplified Fresnel parameter)
v = _xp.abs(blocked_clearances) / 10.0
# Diffraction loss formula from ITU-R P.526
loss = _xp.where(
v <= 0,
_xp.zeros_like(v),
_xp.where(
v < 2.4,
6.02 + 9.11 * v + 1.65 * v ** 2,
12.95 + 20 * _xp.log10(v)
)
)
# Cap at reasonable max
loss = _xp.minimum(loss, 40.0)
terrain_loss[blocked_mask] = loss
return _to_cpu(has_los).astype(bool), _to_cpu(terrain_loss)
def _batch_elevation_lookup(
self,
lats: np.ndarray,
lons: np.ndarray,
terrain_cache: dict,
) -> np.ndarray:
"""Look up elevations from cached terrain tiles with bilinear interpolation.
Vectorized implementation: processes per-tile (1-4 tiles) instead of
per-point (thousands of points). Uses bilinear interpolation for
sub-meter accuracy (vs 15m error with nearest-neighbor at 30m resolution).
Args:
lats, lons: Flattened arrays of coordinates
terrain_cache: Dict mapping tile_name -> numpy array
Returns:
elevations: Same shape as input lats
"""
elevations = np.zeros(len(lats), dtype=np.float64)
# Vectorized tile identification
lat_ints = np.floor(lats).astype(int)
lon_ints = np.floor(lons).astype(int)
# Process per tile (usually 1-4 tiles, not per point)
unique_tiles = set(zip(lat_ints, lon_ints))
for lat_int, lon_int in unique_tiles:
lat_letter = 'N' if lat_int >= 0 else 'S'
lon_letter = 'E' if lon_int >= 0 else 'W'
tile_name = f"{lat_letter}{abs(lat_int):02d}{lon_letter}{abs(lon_int):03d}"
tile = terrain_cache.get(tile_name)
if tile is None:
continue
# Mask for points in this tile
mask = (lat_ints == lat_int) & (lon_ints == lon_int)
tile_lats = lats[mask]
tile_lons = lons[mask]
size = tile.shape[0]
# Vectorized bilinear interpolation
lat_frac = tile_lats - lat_int
lon_frac = tile_lons - lon_int
row_exact = (1.0 - lat_frac) * (size - 1)
col_exact = lon_frac * (size - 1)
r0 = np.clip(row_exact.astype(int), 0, size - 2)
c0 = np.clip(col_exact.astype(int), 0, size - 2)
r1 = r0 + 1
c1 = c0 + 1
dr = row_exact - r0
dc = col_exact - c0
# Get four corner values for all points at once
z00 = tile[r0, c0].astype(np.float64)
z01 = tile[r0, c1].astype(np.float64)
z10 = tile[r1, c0].astype(np.float64)
z11 = tile[r1, c1].astype(np.float64)
# Bilinear interpolation (vectorized)
result = (z00 * (1 - dr) * (1 - dc) +
z01 * (1 - dr) * dc +
z10 * dr * (1 - dc) +
z11 * dr * dc)
# Handle void values (-32768) - set to 0
void_mask = (z00 == -32768) | (z01 == -32768) | (z10 == -32768) | (z11 == -32768)
result[void_mask] = 0.0
elevations[mask] = result
return elevations
def batch_antenna_pattern(
self,
site_lat: float,
site_lon: float,
grid_lats: np.ndarray,
grid_lons: np.ndarray,
azimuth: float,
beamwidth: float,
) -> np.ndarray:
"""Batch compute antenna pattern loss for all grid points.
Returns antenna_loss in dB, shape (N,)
"""
_xp = gpu_manager.get_array_module()
N = len(grid_lats)
if N == 0 or azimuth is None or not beamwidth:
return np.zeros(N, dtype=np.float64)
# Convert to radians
lat1 = _xp.radians(_xp.float64(site_lat))
lon1 = _xp.radians(_xp.float64(site_lon))
lat2 = _xp.radians(_xp.asarray(grid_lats, dtype=_xp.float64))
lon2 = _xp.radians(_xp.asarray(grid_lons, dtype=_xp.float64))
# Calculate bearing from site to each point
dlon = lon2 - lon1
x = _xp.sin(dlon) * _xp.cos(lat2)
y = _xp.cos(lat1) * _xp.sin(lat2) - _xp.sin(lat1) * _xp.cos(lat2) * _xp.cos(dlon)
bearings = (_xp.degrees(_xp.arctan2(x, y)) + 360) % 360
# Angle difference from antenna azimuth
angle_diff = _xp.abs(bearings - azimuth)
angle_diff = _xp.where(angle_diff > 180, 360 - angle_diff, angle_diff)
# Antenna pattern loss (simplified sector pattern)
half_bw = beamwidth / 2
in_main = angle_diff <= half_bw
loss_main = 3 * (angle_diff / half_bw) ** 2
loss_side = 3 + 12 * ((angle_diff - half_bw) / half_bw) ** 2
loss_side = _xp.minimum(loss_side, 25.0)
antenna_loss = _xp.where(in_main, loss_main, loss_side)
return _to_cpu(antenna_loss)
def batch_final_rsrp(
self,
tx_power: float,
tx_gain: float,
path_loss: np.ndarray,
terrain_loss: np.ndarray,
antenna_loss: np.ndarray,
building_loss: np.ndarray,
vegetation_loss: np.ndarray,
rain_loss: np.ndarray,
indoor_loss: np.ndarray,
atmospheric_loss: np.ndarray,
reflection_gain: np.ndarray,
fading_margin: float = 0.0,
) -> np.ndarray:
"""Vectorized final RSRP calculation.
RSRP = tx_power + tx_gain - path_loss - terrain_loss - antenna_loss
- building_loss - vegetation_loss - rain_loss - indoor_loss
- atmospheric_loss + reflection_gain - fading_margin
Returns RSRP in dBm, shape (N,)
"""
_xp = gpu_manager.get_array_module()
rsrp = (
float(tx_power) + float(tx_gain)
- _xp.asarray(path_loss, dtype=_xp.float64)
- _xp.asarray(terrain_loss, dtype=_xp.float64)
- _xp.asarray(antenna_loss, dtype=_xp.float64)
- _xp.asarray(building_loss, dtype=_xp.float64)
- _xp.asarray(vegetation_loss, dtype=_xp.float64)
- _xp.asarray(rain_loss, dtype=_xp.float64)
- _xp.asarray(indoor_loss, dtype=_xp.float64)
- _xp.asarray(atmospheric_loss, dtype=_xp.float64)
+ _xp.asarray(reflection_gain, dtype=_xp.float64)
- float(fading_margin)
)
return _to_cpu(rsrp)
def calculate_interference(
self,
rsrp_grids: list,
frequencies: list,
) -> tuple:
"""Calculate C/I (carrier-to-interference) ratio for multi-site scenarios.
For each grid point:
- C = signal strength from strongest (serving) cell
- I = sum of signal strengths from all other co-frequency cells
- C/I = C(dBm) - 10*log10(sum of linear interference powers)
Args:
rsrp_grids: List of RSRP arrays, one per site, shape (N,) each
frequencies: List of frequencies (MHz) for each site
Returns:
(ci_ratio, best_server_idx, best_rsrp)
ci_ratio: C/I in dB, shape (N,)
best_server_idx: Index of serving cell per point, shape (N,)
best_rsrp: RSRP of serving cell per point, shape (N,)
"""
_xp = gpu_manager.get_array_module()
if len(rsrp_grids) < 2:
# Single site - no interference, return infinity C/I
if rsrp_grids:
n_points = len(rsrp_grids[0])
return (
np.full(n_points, 50.0, dtype=np.float64), # 50 dB = effectively no interference
np.zeros(n_points, dtype=np.int32),
np.array(rsrp_grids[0], dtype=np.float64),
)
return np.array([]), np.array([]), np.array([])
# Stack RSRP grids: shape (num_sites, num_points)
rsrp_stack = _xp.stack([_xp.asarray(g, dtype=_xp.float64) for g in rsrp_grids], axis=0)
num_sites, num_points = rsrp_stack.shape
# Convert to linear power (mW)
rsrp_linear = _xp.power(10.0, rsrp_stack / 10.0)
# Best server per point
best_server_idx = _xp.argmax(rsrp_stack, axis=0)
best_rsrp = _xp.take_along_axis(rsrp_stack, best_server_idx[_xp.newaxis, :], axis=0)[0]
best_rsrp_linear = _xp.take_along_axis(rsrp_linear, best_server_idx[_xp.newaxis, :], axis=0)[0]
# Group sites by frequency for co-channel interference
freq_array = _xp.asarray(frequencies, dtype=_xp.float64)
# Calculate interference only from co-frequency sites
interference_linear = _xp.zeros(num_points, dtype=_xp.float64)
for point_idx in range(num_points):
serving_site = int(_to_cpu(best_server_idx[point_idx]))
serving_freq = frequencies[serving_site]
# Sum power from all other sites on same frequency
for site_idx in range(num_sites):
if site_idx != serving_site and frequencies[site_idx] == serving_freq:
interference_linear[point_idx] += rsrp_linear[site_idx, point_idx]
# C/I ratio in dB
# Avoid log10(0) with small epsilon
epsilon = 1e-30
ci_ratio = 10 * _xp.log10(best_rsrp_linear / (interference_linear + epsilon))
# Clip to reasonable range (-20 to 50 dB)
ci_ratio = _xp.clip(ci_ratio, -20, 50)
return (
_to_cpu(ci_ratio),
_to_cpu(best_server_idx).astype(np.int32),
_to_cpu(best_rsrp),
)
def calculate_interference_vectorized(
self,
rsrp_grids: list,
frequencies: list,
) -> tuple:
"""Fully vectorized C/I calculation (faster for GPU).
Same as calculate_interference but avoids Python loops.
"""
_xp = gpu_manager.get_array_module()
if len(rsrp_grids) < 2:
if rsrp_grids:
n_points = len(rsrp_grids[0])
return (
np.full(n_points, 50.0, dtype=np.float64),
np.zeros(n_points, dtype=np.int32),
np.array(rsrp_grids[0], dtype=np.float64),
)
return np.array([]), np.array([]), np.array([])
# Stack RSRP grids: shape (num_sites, num_points)
rsrp_stack = _xp.stack([_xp.asarray(g, dtype=_xp.float64) for g in rsrp_grids], axis=0)
num_sites, num_points = rsrp_stack.shape
# Convert to linear power (mW)
rsrp_linear = _xp.power(10.0, rsrp_stack / 10.0)
# Best server per point
best_server_idx = _xp.argmax(rsrp_stack, axis=0)
best_rsrp = _xp.take_along_axis(rsrp_stack, best_server_idx[_xp.newaxis, :], axis=0)[0]
best_rsrp_linear = _xp.take_along_axis(rsrp_linear, best_server_idx[_xp.newaxis, :], axis=0)[0]
# Create frequency match matrix: (num_sites, num_sites)
freq_array = _xp.asarray(frequencies, dtype=_xp.float64)
freq_match = freq_array[:, _xp.newaxis] == freq_array[_xp.newaxis, :]
# Total power from all sites
total_power = _xp.sum(rsrp_linear, axis=0)
# For simplified calculation (all sites same frequency):
# Interference = total - serving
interference_linear = total_power - best_rsrp_linear
# C/I ratio in dB
epsilon = 1e-30
ci_ratio = 10 * _xp.log10(best_rsrp_linear / (interference_linear + epsilon))
# Clip to reasonable range
ci_ratio = _xp.clip(ci_ratio, -20, 50)
return (
_to_cpu(ci_ratio),
_to_cpu(best_server_idx).astype(np.int32),
_to_cpu(best_rsrp),
)
# Singleton # Singleton
gpu_service = GPUService() gpu_service = GPUService()

View File

@@ -226,6 +226,9 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
config['site_elevation'], point_elev, timing, config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if point.rsrp >= settings.min_signal: if point.rsrp >= settings.min_signal:
results.append(point.model_dump()) results.append(point.model_dump())
@@ -535,6 +538,9 @@ def _pool_worker_process_chunk(args):
config['site_elevation'], point_elev, timing, config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if point.rsrp >= settings.min_signal: if point.rsrp >= settings.min_signal:
results.append(point.model_dump()) results.append(point.model_dump())
@@ -654,6 +660,9 @@ def _pool_worker_shm_chunk(args):
config['site_elevation'], point_elev, timing, config['site_elevation'], point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if point.rsrp >= settings.min_signal: if point.rsrp >= settings.min_signal:
results.append(point.model_dump()) results.append(point.model_dump())
@@ -816,6 +825,9 @@ def _pool_worker_shm_shared(args):
site_elev, point_elev, timing, site_elev, point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if i < 3: if i < 3:
@@ -1134,6 +1146,9 @@ def _calculate_sequential(
site_elevation, point_elev, timing, site_elevation, point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None, precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None, precomputed_path_loss=pre.get('path_loss') if pre else None,
precomputed_has_los=pre.get('has_los') if pre else None,
precomputed_terrain_loss=pre.get('terrain_loss') if pre else None,
precomputed_antenna_loss=pre.get('antenna_loss') if pre else None,
) )
if point.rsrp >= settings.min_signal: if point.rsrp >= settings.min_signal:
results.append(point.model_dump()) results.append(point.model_dump())

View File

@@ -20,8 +20,24 @@ class TerrainService:
""" """
SRTM_SOURCES = [ SRTM_SOURCES = [
"https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz", # Our tile server — SRTM1 (30m) preferred, uncompressed
"https://s3.amazonaws.com/elevation-tiles-prod/skadi/{lat_dir}/{tile_name}.hgt.gz", {
"url": "https://terra.eliah.one/srtm1/{tile_name}.hgt",
"compressed": False,
"resolution": "srtm1",
},
# Our tile server — SRTM3 (90m) fallback
{
"url": "https://terra.eliah.one/srtm3/{tile_name}.hgt",
"compressed": False,
"resolution": "srtm3",
},
# Public AWS mirror — SRTM1, gzip compressed
{
"url": "https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
"compressed": True,
"resolution": "srtm1",
},
] ]
def __init__(self): def __init__(self):
@@ -48,7 +64,7 @@ class TerrainService:
return self.terrain_path / f"{tile_name}.hgt" return self.terrain_path / f"{tile_name}.hgt"
async def download_tile(self, tile_name: str) -> bool: async def download_tile(self, tile_name: str) -> bool:
"""Download SRTM tile if not cached locally""" """Download SRTM tile from configured sources, preferring highest resolution."""
tile_path = self.get_tile_path(tile_name) tile_path = self.get_tile_path(tile_name)
if tile_path.exists(): if tile_path.exists():
@@ -56,33 +72,45 @@ class TerrainService:
lat_dir = tile_name[:3] # e.g., "N48" lat_dir = tile_name[:3] # e.g., "N48"
async with httpx.AsyncClient(timeout=60.0) as client: async with httpx.AsyncClient(timeout=60.0, follow_redirects=True) as client:
for source_url in self.SRTM_SOURCES: for source in self.SRTM_SOURCES:
url = source_url.format(lat_dir=lat_dir, tile_name=tile_name) url = source["url"].format(lat_dir=lat_dir, tile_name=tile_name)
try: try:
response = await client.get(url) response = await client.get(url)
if response.status_code == 200: if response.status_code == 200:
data = response.content data = response.content
if url.endswith('.gz'): # Skip empty responses
data = gzip.decompress(data) if len(data) < 1000:
elif url.endswith('.zip'): continue
with zipfile.ZipFile(io.BytesIO(data)) as zf:
for name in zf.namelist(): if source["compressed"]:
if name.endswith('.hgt'): if url.endswith('.gz'):
data = zf.read(name) data = gzip.decompress(data)
break elif url.endswith('.zip'):
with zipfile.ZipFile(io.BytesIO(data)) as zf:
for name in zf.namelist():
if name.endswith('.hgt'):
data = zf.read(name)
break
# Validate tile size (SRTM1: 25,934,402 bytes, SRTM3: 2,884,802 bytes)
if len(data) not in (3601 * 3601 * 2, 1201 * 1201 * 2):
print(f"[Terrain] Invalid tile size {len(data)} from {url}")
continue
tile_path.write_bytes(data) tile_path.write_bytes(data)
print(f"[Terrain] Downloaded {tile_name} ({len(data)} bytes)") res = source["resolution"]
size_mb = len(data) / 1048576
print(f"[Terrain] Downloaded {tile_name} ({res}, {size_mb:.1f} MB)")
return True return True
except Exception as e: except Exception as e:
print(f"[Terrain] Failed from {url}: {e}") print(f"[Terrain] Failed from {url}: {e}")
continue continue
print(f"[Terrain] Could not download {tile_name}") print(f"[Terrain] Could not download {tile_name} from any source")
return False return False
def _load_tile(self, tile_name: str) -> Optional[np.ndarray]: def _load_tile(self, tile_name: str) -> Optional[np.ndarray]:
@@ -149,56 +177,179 @@ class TerrainService:
return self._load_tile(tile_name) return self._load_tile(tile_name)
def _bilinear_sample(self, tile: np.ndarray, lat: float, lon: float) -> float:
"""Sample elevation with bilinear interpolation for sub-meter accuracy.
SRTM1 at 30m means nearest-neighbor can have 15m positional error.
Bilinear interpolation reduces this to sub-meter accuracy.
"""
size = tile.shape[0]
# Tile southwest corner
lat_int = int(lat) if lat >= 0 else int(lat) - 1
lon_int = int(lon) if lon >= 0 else int(lon) - 1
# Fractional position within tile (0.0 to 1.0)
lat_frac = lat - lat_int # 0 = south edge, 1 = north edge
lon_frac = lon - lon_int # 0 = west edge, 1 = east edge
# Convert to row/col (note: rows go north to south!)
row_exact = (1.0 - lat_frac) * (size - 1) # 0 = north, size-1 = south
col_exact = lon_frac * (size - 1) # 0 = west, size-1 = east
# Four surrounding grid points
r0 = int(row_exact)
c0 = int(col_exact)
r1 = min(r0 + 1, size - 1)
c1 = min(c0 + 1, size - 1)
# Fractional position between grid points
dr = row_exact - r0
dc = col_exact - c0
# Get four corner values
z00 = tile[r0, c0]
z01 = tile[r0, c1]
z10 = tile[r1, c0]
z11 = tile[r1, c1]
# Handle void (-32768) values - fall back to nearest valid
void_val = -32768
corners = [(z00, r0, c0), (z01, r0, c1), (z10, r1, c0), (z11, r1, c1)]
if z00 == void_val or z01 == void_val or z10 == void_val or z11 == void_val:
valid = [(z, r, c) for z, r, c in corners if z != void_val]
if not valid:
return 0.0
# Return nearest valid value
return float(valid[0][0])
# Bilinear interpolation
elevation = (z00 * (1 - dr) * (1 - dc) +
z01 * (1 - dr) * dc +
z10 * dr * (1 - dc) +
z11 * dr * dc)
return float(elevation)
async def get_elevation(self, lat: float, lon: float) -> float: async def get_elevation(self, lat: float, lon: float) -> float:
"""Get elevation at specific coordinate (meters above sea level)""" """Get elevation at specific coordinate with bilinear interpolation."""
tile_name = self.get_tile_name(lat, lon) tile_name = self.get_tile_name(lat, lon)
tile = await self.load_tile(tile_name) tile = await self.load_tile(tile_name)
if tile is None: if tile is None:
return 0.0 return 0.0
size = tile.shape[0] return self._bilinear_sample(tile, lat, lon)
# Calculate position within tile
lat_int = int(lat) if lat >= 0 else int(lat) - 1
lon_int = int(lon) if lon >= 0 else int(lon) - 1
lat_frac = lat - lat_int
lon_frac = lon - lon_int
# Row 0 = north edge, last row = south edge
row = int((1 - lat_frac) * (size - 1))
col = int(lon_frac * (size - 1))
row = max(0, min(row, size - 1))
col = max(0, min(col, size - 1))
elevation = tile[row, col]
# -32768 = void/no data
if elevation == -32768:
return 0.0
return float(elevation)
def get_elevation_sync(self, lat: float, lon: float) -> float: def get_elevation_sync(self, lat: float, lon: float) -> float:
"""Sync elevation lookup from memory cache. Returns 0.0 if tile not loaded.""" """Sync elevation lookup with bilinear interpolation. Returns 0.0 if tile not loaded."""
tile_name = self.get_tile_name(lat, lon) tile_name = self.get_tile_name(lat, lon)
tile = self._tile_cache.get(tile_name) tile = self._tile_cache.get(tile_name)
if tile is None: if tile is None:
return 0.0 return 0.0
size = tile.shape[0] return self._bilinear_sample(tile, lat, lon)
lat_int = int(lat) if lat >= 0 else int(lat) - 1
lon_int = int(lon) if lon >= 0 else int(lon) - 1
row = int((1 - (lat - lat_int)) * (size - 1)) def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
col = int((lon - lon_int) * (size - 1)) """Vectorized elevation lookup with bilinear interpolation.
row = max(0, min(row, size - 1))
col = max(0, min(col, size - 1))
elevation = tile[row, col] Handles points spanning multiple tiles efficiently.
return 0.0 if elevation == -32768 else float(elevation) Groups points by tile, processes each tile with full NumPy vectorization.
Tiles must be pre-loaded into memory cache.
Args:
lats: Array of latitudes
lons: Array of longitudes
Returns:
Array of elevations (0.0 for missing tiles or void data)
"""
elevations = np.zeros(len(lats), dtype=np.float32)
# Compute tile indices for each point
lat_ints = np.floor(lats).astype(int)
lon_ints = np.floor(lons).astype(int)
# Group by tile using unique key
unique_tiles = set(zip(lat_ints, lon_ints))
for lat_int, lon_int in unique_tiles:
# Get tile name
lat_letter = 'N' if lat_int >= 0 else 'S'
lon_letter = 'E' if lon_int >= 0 else 'W'
tile_name = f"{lat_letter}{abs(lat_int):02d}{lon_letter}{abs(lon_int):03d}"
tile = self._tile_cache.get(tile_name)
if tile is None:
continue
# Mask for points in this tile
mask = (lat_ints == lat_int) & (lon_ints == lon_int)
tile_lats = lats[mask]
tile_lons = lons[mask]
size = tile.shape[0]
# Vectorized bilinear interpolation for all points in this tile
lat_frac = tile_lats - lat_int
lon_frac = tile_lons - lon_int
row_exact = (1.0 - lat_frac) * (size - 1)
col_exact = lon_frac * (size - 1)
r0 = np.clip(row_exact.astype(int), 0, size - 2)
c0 = np.clip(col_exact.astype(int), 0, size - 2)
r1 = r0 + 1
c1 = c0 + 1
dr = row_exact - r0
dc = col_exact - c0
# Get four corner values for all points at once
z00 = tile[r0, c0].astype(np.float32)
z01 = tile[r0, c1].astype(np.float32)
z10 = tile[r1, c0].astype(np.float32)
z11 = tile[r1, c1].astype(np.float32)
# Bilinear interpolation (vectorized)
result = (z00 * (1 - dr) * (1 - dc) +
z01 * (1 - dr) * dc +
z10 * dr * (1 - dc) +
z11 * dr * dc)
# Handle void values (-32768) - set to 0
void_mask = (z00 == -32768) | (z01 == -32768) | (z10 == -32768) | (z11 == -32768)
result[void_mask] = 0.0
elevations[mask] = result
return elevations
def get_required_tiles(self, center_lat: float, center_lon: float, radius_km: float) -> list:
"""Determine which tiles are needed for a coverage calculation."""
# Convert radius to degrees (approximate)
lat_delta = radius_km / 111.0 # ~111 km per degree latitude
lon_delta = radius_km / (111.0 * np.cos(np.radians(center_lat)))
min_lat = center_lat - lat_delta
max_lat = center_lat + lat_delta
min_lon = center_lon - lon_delta
max_lon = center_lon + lon_delta
tiles = []
for lat in range(int(np.floor(min_lat)), int(np.floor(max_lat)) + 1):
for lon in range(int(np.floor(min_lon)), int(np.floor(max_lon)) + 1):
lat_letter = 'N' if lat >= 0 else 'S'
lon_letter = 'E' if lon >= 0 else 'W'
tile_name = f"{lat_letter}{abs(lat):02d}{lon_letter}{abs(lon):03d}"
tiles.append(tile_name)
return tiles
def get_missing_tiles(self, center_lat: float, center_lon: float, radius_km: float) -> list:
"""Check which needed tiles are not available locally."""
required = self.get_required_tiles(center_lat, center_lon, radius_km)
return [t for t in required if not self.get_tile_path(t).exists()]
async def get_elevation_profile( async def get_elevation_profile(
self, self,

View File

@@ -21,6 +21,11 @@ class VegetationArea(BaseModel):
geometry: List[Tuple[float, float]] # [(lon, lat), ...] geometry: List[Tuple[float, float]] # [(lon, lat), ...]
vegetation_type: str # forest, wood, scrub, orchard vegetation_type: str # forest, wood, scrub, orchard
density: str # dense, sparse, mixed density: str # dense, sparse, mixed
# Bounding box for fast rejection (computed from geometry)
min_lat: float = 0.0
max_lat: float = 0.0
min_lon: float = 0.0
max_lon: float = 0.0
class VegetationCache: class VegetationCache:
@@ -127,7 +132,24 @@ class VegetationService:
cached = self.cache.get(min_lat, min_lon, max_lat, max_lon) cached = self.cache.get(min_lat, min_lon, max_lat, max_lon)
if cached is not None: if cached is not None:
print(f"[Vegetation] Cache hit for bbox") print(f"[Vegetation] Cache hit for bbox")
areas = [VegetationArea(**v) for v in cached] areas = []
for v in cached:
area = VegetationArea(**v)
# Recompute bbox if missing (backward compat with old cache)
if area.min_lat == 0.0 and area.max_lat == 0.0 and area.geometry:
lons = [p[0] for p in area.geometry]
lats = [p[1] for p in area.geometry]
area = VegetationArea(
id=area.id,
geometry=area.geometry,
vegetation_type=area.vegetation_type,
density=area.density,
min_lat=min(lats),
max_lat=max(lats),
min_lon=min(lons),
max_lon=max(lons),
)
areas.append(area)
self._memory_cache[cache_key] = areas self._memory_cache[cache_key] = areas
return areas return areas
@@ -205,11 +227,19 @@ class VegetationService:
leaf_type = tags.get("leaf_type", "mixed") leaf_type = tags.get("leaf_type", "mixed")
density = "dense" if leaf_type == "needleleaved" else "mixed" density = "dense" if leaf_type == "needleleaved" else "mixed"
# Compute bounding box from geometry (lon, lat tuples)
lons = [p[0] for p in geometry]
lats = [p[1] for p in geometry]
areas.append(VegetationArea( areas.append(VegetationArea(
id=element["id"], id=element["id"],
geometry=geometry, geometry=geometry,
vegetation_type=veg_type, vegetation_type=veg_type,
density=density density=density,
min_lat=min(lats),
max_lat=max(lats),
min_lon=min(lons),
max_lon=max(lons),
)) ))
return areas return areas
@@ -260,8 +290,12 @@ class VegetationService:
lat: float, lon: float, lat: float, lon: float,
areas: List[VegetationArea] areas: List[VegetationArea]
) -> Optional[VegetationArea]: ) -> Optional[VegetationArea]:
"""Check if point is in vegetation area""" """Check if point is in vegetation area (with bbox pre-filter)"""
for area in areas: for area in areas:
# Quick bbox reject - skips 95%+ of polygons
if not (area.min_lat <= lat <= area.max_lat and
area.min_lon <= lon <= area.max_lon):
continue
if self._point_in_polygon(lat, lon, area.geometry): if self._point_in_polygon(lat, lon, area.geometry):
return area return area
return None return None

View File

@@ -0,0 +1,8 @@
# Development and testing dependencies
# Install with: pip install -r requirements-dev.txt
pytest>=7.0.0
pytest-asyncio>=0.21.0
httpx>=0.27.0
ruff>=0.1.0
mypy>=1.7.0

View File

@@ -0,0 +1,10 @@
# NVIDIA GPU acceleration via CuPy
# Install with: pip install -r requirements-gpu-nvidia.txt
#
# Choose ONE based on your CUDA version:
# - cupy-cuda12x for CUDA 12.x (RTX 30xx, 40xx, newer)
# - cupy-cuda11x for CUDA 11.x (older cards)
#
# CuPy bundles CUDA runtime (~700 MB) - no separate CUDA install needed
cupy-cuda12x>=13.0.0

View File

@@ -0,0 +1,14 @@
# Intel/AMD GPU acceleration via PyOpenCL
# Install with: pip install -r requirements-gpu-opencl.txt
#
# Works with:
# - Intel UHD/Iris Graphics (integrated)
# - AMD Radeon (discrete)
# - NVIDIA GPUs (alternative to CUDA)
#
# Requires OpenCL runtime:
# - Intel: Intel GPU Computing Runtime
# - AMD: AMD Adrenalin driver (includes OpenCL)
# - NVIDIA: NVIDIA driver (includes OpenCL)
pyopencl>=2023.1

View File

@@ -7,6 +7,7 @@ pymongo==4.6.1
pydantic-settings==2.1.0 pydantic-settings==2.1.0
numpy==1.26.4 numpy==1.26.4
scipy==1.12.0 scipy==1.12.0
shapely>=2.0.0
requests==2.31.0 requests==2.31.0
httpx==0.27.0 httpx==0.27.0
aiosqlite>=0.19.0 aiosqlite>=0.19.0

View File

@@ -29,7 +29,23 @@ if getattr(sys, 'frozen', False):
print(f"[RFCP] Frozen mode, base dir: {base_dir}", flush=True) print(f"[RFCP] Frozen mode, base dir: {base_dir}", flush=True)
# Fix uvicorn TTY detection — redirect None streams to a log file # Fix uvicorn TTY detection — redirect None streams to a log file
log_path = os.path.join(base_dir, 'rfcp-server.log') # Use RFCP_LOG_PATH from Electron, or fallback to user-writable location
log_dir = os.environ.get('RFCP_LOG_PATH')
if not log_dir:
if sys.platform == 'win32':
appdata = os.environ.get('APPDATA', os.path.expanduser('~'))
log_dir = os.path.join(appdata, 'rfcp-desktop', 'logs')
else:
log_dir = os.path.join(os.path.expanduser('~'), '.rfcp', 'logs')
try:
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, 'rfcp-server.log')
except Exception:
# Fallback to temp directory if all else fails
import tempfile
log_path = os.path.join(tempfile.gettempdir(), 'rfcp-server.log')
log_file = open(log_path, 'w') log_file = open(log_path, 'w')
if sys.stdout is None: if sys.stdout is None:
sys.stdout = log_file sys.stdout = log_file

View File

@@ -52,9 +52,11 @@ const getLogPath = () => {
const getBackendExePath = () => { const getBackendExePath = () => {
const exeName = process.platform === 'win32' ? 'rfcp-server.exe' : 'rfcp-server'; const exeName = process.platform === 'win32' ? 'rfcp-server.exe' : 'rfcp-server';
if (isDev) { if (isDev) {
return path.join(__dirname, '..', 'backend', exeName); // Dev: use the ONEDIR build output
return path.join(__dirname, '..', 'backend', 'dist', 'rfcp-server', exeName);
} }
return getResourcePath('backend', exeName); // Production: ONEDIR structure - backend/rfcp-server/rfcp-server.exe
return getResourcePath('backend', 'rfcp-server', exeName);
}; };
/** Frontend index.html path (production only) */ /** Frontend index.html path (production only) */

View File

@@ -0,0 +1,233 @@
# RFCP Native Backend Research
## Executive Summary
**Finding:** The production Electron app already supports native Windows operation without WSL2.
The production build uses PyInstaller to bundle the Python backend as a standalone Windows executable (`rfcp-server.exe`). WSL2 is only used during development. No migration is needed for end users.
---
## Current Architecture
### Development Mode
```
RFCP (Electron dev)
└── Spawns: python -m uvicorn app.main:app --host 127.0.0.1 --port 8090
└── Uses system Python (Windows or WSL2)
└── Requires venv with dependencies
```
### Production Mode (Already Implemented)
```
RFCP.exe (Electron packaged)
└── Spawns: rfcp-server.exe (bundled PyInstaller binary)
└── Self-contained Python + all dependencies
└── No WSL2 required
└── No system Python required
```
---
## Evidence from Codebase
### desktop/main.js (Lines 120-145)
```javascript
function startBackend() {
// Production: use bundled executable
if (isProduction) {
const serverPath = path.join(process.resourcesPath, 'rfcp-server.exe');
if (fs.existsSync(serverPath)) {
backendProcess = spawn(serverPath, [], { ... });
return;
}
}
// Development: use system Python
backendProcess = spawn('python', ['-m', 'uvicorn', 'app.main:app', ...]);
}
```
### installer/rfcp-server.spec (PyInstaller Config)
```python
# Key configuration
a = Analysis(
['run_server.py'],
pathex=[backend_path],
binaries=[],
datas=[
('data/terrain', 'data/terrain'), # Terrain data bundled
],
hiddenimports=[
'uvicorn.logging', 'uvicorn.loops', 'uvicorn.protocols',
'motor', 'pymongo', 'numpy', 'scipy', 'shapely',
# Full list of dependencies
],
)
exe = EXE(
pyz,
a.scripts,
name='rfcp-server',
console=True, # Shows console for debugging
icon='rfcp.ico',
)
```
---
## GPU Acceleration in Production
### Current Status
The PyInstaller bundle **does not include CuPy** by default because:
1. CuPy requires CUDA runtime (large, ~500MB)
2. Not all users have NVIDIA GPUs
3. Binary would be too large for distribution
### Solution Options
**Option A: Ship CPU-only (Current)**
- Production build uses NumPy (CPU) for calculations
- GPU acceleration available only in dev mode or manual install
- Smallest download size (~100MB)
**Option B: Separate GPU Installer**
- Main installer: CPU-only (~100MB)
- Optional GPU addon: Downloads CuPy + CUDA runtime (~600MB)
- Implemented via install_rfcp.py dependency installer
**Option C: CUDA Toolkit Detection**
- Detect if CUDA is already installed on user's system
- If yes, attempt to load CuPy dynamically
- Graceful fallback to NumPy if not available
### Recommendation
Keep Option A (CPU-only production) with Option B available for power users:
1. Default production build works everywhere
2. Users with NVIDIA GPUs can run `install_rfcp.py` to enable GPU acceleration
3. No WSL2 required for either path
---
## Terrain Data Handling
### Current Implementation
Terrain data (SRTM .hgt files) is bundled inside the PyInstaller executable:
```python
datas=[
('data/terrain', 'data/terrain'),
]
```
### Considerations
- Bundled terrain data increases exe size significantly
- Alternative: Download terrain on first use (like current region download system)
- For initial release, bundling common regions is acceptable
---
## Database (MongoDB)
### Production Architecture
The Electron app embeds MongoDB or requires MongoDB to be installed separately.
Options:
1. **Embedded MongoDB** - Ships mongod.exe with the app
2. **MongoDB Atlas** - Cloud database (requires internet)
3. **SQLite** - Switch to file-based database (significant refactor)
4. **In-memory + file persistence** - No MongoDB required (significant refactor)
Current implementation uses Motor (async MongoDB driver). For true standalone operation, consider SQLite migration in future iteration.
---
## Build Process
### Current Build Commands
```bash
# Build backend executable
cd /mnt/d/root/rfcp/backend
pyinstaller ../installer/rfcp-server.spec
# Build Electron app with bundled backend
cd /mnt/d/root/rfcp/installer
./build-win.sh
```
### Output
- `rfcp-server.exe` - Standalone backend (~80MB)
- `RFCP-Setup-{version}.exe` - Full installer with Electron + backend (~150MB)
---
## Testing Native Build
### Test Procedure
1. Build `rfcp-server.exe` via PyInstaller
2. Run directly: `./rfcp-server.exe`
3. Verify API responds: `curl http://localhost:8090/api/health`
4. Verify coverage calculation works
5. Check GPU detection in logs
### Known Issues
1. **First launch slow**: PyInstaller extracts on first run (~5-10 seconds)
2. **Antivirus false positives**: Some AV flags PyInstaller executables
3. **Console window**: Shows black console (use `console=False` for windowless)
---
## Conclusions
### No Migration Needed
The production Electron app already works without WSL2. The current architecture is:
- ✅ Native Windows executable
- ✅ No Python installation required
- ✅ No WSL2 required
- ✅ Self-contained dependencies
### Development vs Production
| Aspect | Development | Production |
|--------|-------------|------------|
| Python | System Python / venv | Bundled via PyInstaller |
| WSL2 | Optional (for testing) | Not required |
| GPU | CuPy if installed | CPU-only (NumPy) |
| MongoDB | Local instance | Embedded or Atlas |
| Terrain | Local data/ folder | Bundled in exe |
### Remaining Work
1. **GPU for production**: Implement Optional GPU addon installer
2. **Smaller package**: On-demand terrain download instead of bundling
3. **Database portability**: Consider SQLite migration for offline-first
4. **Installer polish**: Signed executables, auto-update support
---
## Appendix: Full PyInstaller Hidden Imports
From `installer/rfcp-server.spec`:
```python
hiddenimports=[
'uvicorn.logging',
'uvicorn.loops',
'uvicorn.loops.auto',
'uvicorn.protocols',
'uvicorn.protocols.http',
'uvicorn.protocols.http.auto',
'uvicorn.protocols.websockets',
'uvicorn.protocols.websockets.auto',
'uvicorn.lifespan',
'uvicorn.lifespan.on',
'motor',
'pymongo',
'numpy',
'scipy',
'shapely',
'shapely.geometry',
'shapely.ops',
# ... additional imports
]
```

View File

@@ -0,0 +1,463 @@
# RFCP — Iteration 3.10: Link Budget, Fresnel Zone & Interference Modeling
## Overview
Add three interconnected RF analysis features: link budget calculator panel, Fresnel zone visualization on terrain profiles, and basic interference (C/I) modeling for multi-site scenarios. These build on existing infrastructure — propagation models, terrain profiles, and multi-site coverage.
## Priority Order
1. Link Budget Calculator (simplest, standalone UI)
2. Fresnel Zone Visualization (extends terrain profile)
3. Interference Modeling (extends coverage engine)
---
## Feature 1: Link Budget Calculator
### Description
A panel/dialog that shows the complete RF link budget as a table — from transmitter to receiver. Uses existing propagation model values but presents them in the standard telecom link budget format.
### Implementation
**New component:** `frontend/src/components/panels/LinkBudgetPanel.tsx`
The panel should display a table with rows for each element in the link chain. It should use the currently selected site's parameters and a configurable receiver point (either clicked on map or manually entered coordinates).
**Link Budget Table Structure:**
```
TRANSMITTER
Tx Power (dBm) [from site config, e.g. 43 dBm]
Tx Antenna Gain (dBi) [from site config, e.g. 18 dBi]
Tx Cable/Connector Loss (dB) [new field, default 2 dB]
EIRP (dBm) = Tx Power + Gain - Cable Loss
PATH
Distance (km) [calculated from Tx to Rx point]
Free Space Path Loss (dB) [existing formula: 20log(d) + 20log(f) + 32.45]
Terrain Diffraction Loss (dB) [from terrain_los model if available]
Vegetation Loss (dB) [from vegetation model if available]
Atmospheric Loss (dB) [from atmospheric model if available]
Total Path Loss (dB) = sum of all path losses
RECEIVER
Rx Antenna Gain (dBi) [configurable, default 0 dBi for handset]
Rx Cable Loss (dB) [configurable, default 0 dB]
Rx Sensitivity (dBm) [configurable, default -100 dBm]
RESULT
Received Power (dBm) = EIRP - Total Path Loss + Rx Gain - Rx Cable
Link Margin (dB) = Received Power - Rx Sensitivity
Status = "OK" if margin > 0, "FAIL" if < 0
```
**Backend addition:** Add a new endpoint or extend existing coverage API.
**File:** `backend/app/api/routes/coverage.py` (or new `link_budget.py`)
```python
@router.post("/api/link-budget")
async def calculate_link_budget(request: dict):
"""Calculate point-to-point link budget.
Body: {
"site_id": "...", # or tx_lat/tx_lon/tx_params
"tx_lat": 48.46,
"tx_lon": 35.04,
"tx_power_dbm": 43,
"tx_gain_dbi": 18,
"tx_cable_loss_db": 2,
"tx_height_m": 30,
"rx_lat": 48.50,
"rx_lon": 35.10,
"rx_gain_dbi": 0,
"rx_cable_loss_db": 0,
"rx_sensitivity_dbm": -100,
"rx_height_m": 1.5,
"frequency_mhz": 1800
}
"""
from app.services.terrain_service import terrain_service
# Calculate distance
distance_m = terrain_service.haversine_distance(
request["tx_lat"], request["tx_lon"],
request["rx_lat"], request["rx_lon"]
)
distance_km = distance_m / 1000
# Get elevations
tx_elev = await terrain_service.get_elevation(request["tx_lat"], request["tx_lon"])
rx_elev = await terrain_service.get_elevation(request["rx_lat"], request["rx_lon"])
# EIRP
eirp_dbm = request["tx_power_dbm"] + request["tx_gain_dbi"] - request["tx_cable_loss_db"]
# Free space path loss
freq = request["frequency_mhz"]
fspl_db = 20 * math.log10(distance_km) + 20 * math.log10(freq) + 32.45 if distance_km > 0 else 0
# Terrain profile for LOS check
profile = await terrain_service.get_elevation_profile(
request["tx_lat"], request["tx_lon"],
request["rx_lat"], request["rx_lon"],
num_points=100
)
# Simple LOS check - does terrain block line of sight?
tx_total_height = tx_elev + request.get("tx_height_m", 30)
rx_total_height = rx_elev + request.get("rx_height_m", 1.5)
terrain_loss_db = 0
los_clear = True
for i, point in enumerate(profile):
if i == 0 or i == len(profile) - 1:
continue
# Linear interpolation of LOS line at this point
fraction = i / (len(profile) - 1)
los_height = tx_total_height + fraction * (rx_total_height - tx_total_height)
if point["elevation"] > los_height:
los_clear = False
# Simple knife-edge diffraction estimate
terrain_loss_db += 6 # ~6dB per obstruction (simplified)
total_path_loss = fspl_db + terrain_loss_db
# Received power
rx_power_dbm = eirp_dbm - total_path_loss + request["rx_gain_dbi"] - request["rx_cable_loss_db"]
# Link margin
margin_db = rx_power_dbm - request["rx_sensitivity_dbm"]
return {
"distance_km": round(distance_km, 2),
"distance_m": round(distance_m, 1),
"tx_elevation_m": round(tx_elev, 1),
"rx_elevation_m": round(rx_elev, 1),
"eirp_dbm": round(eirp_dbm, 1),
"fspl_db": round(fspl_db, 1),
"terrain_loss_db": round(terrain_loss_db, 1),
"total_path_loss_db": round(total_path_loss, 1),
"los_clear": los_clear,
"rx_power_dbm": round(rx_power_dbm, 1),
"margin_db": round(margin_db, 1),
"status": "OK" if margin_db >= 0 else "FAIL",
"profile": profile,
}
```
### UI Requirements
- New panel accessible from sidebar or toolbar button (calculator icon)
- Click on map to set Rx point (with crosshair cursor)
- Auto-populates Tx params from selected site
- Shows result table with color coding (green margin = OK, red = FAIL)
- Optionally draws line on map from Tx to Rx
---
## Feature 2: Fresnel Zone Visualization
### Description
Draw Fresnel zone ellipse overlay on the Terrain Profile chart, showing where terrain intrudes into the first Fresnel zone. This is critical for understanding if a radio link will actually work — even if terrain doesn't block direct LOS, Fresnel zone obstruction causes significant signal loss.
### Implementation
**Modify:** The existing Terrain Profile component/chart
**Fresnel Zone Radius Formula:**
```python
import math
def fresnel_radius(n: int, frequency_mhz: float, d1_m: float, d2_m: float) -> float:
"""Calculate nth Fresnel zone radius at a point along the path.
Args:
n: Fresnel zone number (1 = first zone, most important)
frequency_mhz: Frequency in MHz
d1_m: Distance from transmitter to this point (meters)
d2_m: Distance from this point to receiver (meters)
Returns:
Radius of nth Fresnel zone in meters
"""
wavelength = 300.0 / frequency_mhz # meters
d_total = d1_m + d2_m
if d_total == 0:
return 0
radius = math.sqrt((n * wavelength * d1_m * d2_m) / d_total)
return radius
```
**Backend endpoint:** `backend/app/api/routes/coverage.py`
```python
@router.post("/api/fresnel-profile")
async def fresnel_profile(request: dict):
"""Calculate terrain profile with Fresnel zone boundaries.
Body: {
"tx_lat": 48.46, "tx_lon": 35.04, "tx_height_m": 30,
"rx_lat": 48.50, "rx_lon": 35.10, "rx_height_m": 1.5,
"frequency_mhz": 1800,
"num_points": 100
}
"""
from app.services.terrain_service import terrain_service
tx_lat, tx_lon = request["tx_lat"], request["tx_lon"]
rx_lat, rx_lon = request["rx_lat"], request["rx_lon"]
tx_height = request.get("tx_height_m", 30)
rx_height = request.get("rx_height_m", 1.5)
freq = request.get("frequency_mhz", 1800)
num_points = request.get("num_points", 100)
# Get terrain profile
profile = await terrain_service.get_elevation_profile(
tx_lat, tx_lon, rx_lat, rx_lon, num_points
)
total_distance = profile[-1]["distance"] if profile else 0
# Get endpoint elevations
tx_elev = profile[0]["elevation"] if profile else 0
rx_elev = profile[-1]["elevation"] if profile else 0
tx_total = tx_elev + tx_height
rx_total = rx_elev + rx_height
wavelength = 300.0 / freq # meters
# Calculate Fresnel zone at each profile point
fresnel_data = []
los_blocked = False
fresnel_blocked = False
worst_clearance = float('inf')
for i, point in enumerate(profile):
d1 = point["distance"] # distance from tx
d2 = total_distance - d1 # distance to rx
# LOS height at this point (linear interpolation)
if total_distance > 0:
fraction = d1 / total_distance
else:
fraction = 0
los_height = tx_total + fraction * (rx_total - tx_total)
# First Fresnel zone radius
if d1 > 0 and d2 > 0 and total_distance > 0:
f1_radius = math.sqrt((1 * wavelength * d1 * d2) / total_distance)
else:
f1_radius = 0
# Fresnel zone boundaries (height above sea level)
fresnel_top = los_height + f1_radius
fresnel_bottom = los_height - f1_radius
# Clearance: how much space between terrain and Fresnel bottom
clearance = fresnel_bottom - point["elevation"]
if clearance < worst_clearance:
worst_clearance = clearance
if point["elevation"] > los_height:
los_blocked = True
if point["elevation"] > fresnel_bottom:
fresnel_blocked = True
fresnel_data.append({
"distance": point["distance"],
"lat": point["lat"],
"lon": point["lon"],
"terrain_elevation": point["elevation"],
"los_height": round(los_height, 1),
"fresnel_top": round(fresnel_top, 1),
"fresnel_bottom": round(fresnel_bottom, 1),
"f1_radius": round(f1_radius, 1),
"clearance": round(clearance, 1),
})
return {
"profile": fresnel_data,
"total_distance_m": round(total_distance, 1),
"tx_elevation": round(tx_elev, 1),
"rx_elevation": round(rx_elev, 1),
"frequency_mhz": freq,
"wavelength_m": round(wavelength, 4),
"los_clear": not los_blocked,
"fresnel_clear": not fresnel_blocked,
"worst_clearance_m": round(worst_clearance, 1),
"recommendation": (
"Clear — excellent link" if not fresnel_blocked
else "Fresnel zone partially blocked — expect 3-6 dB additional loss"
if not los_blocked
else "LOS blocked — significant diffraction loss expected"
),
}
```
### Frontend Visualization
On the existing Terrain Profile chart:
- Draw the LOS line (straight line from Tx to Rx) — this may already exist
- Draw first Fresnel zone as a **semi-transparent elliptical area** around the LOS line
- Upper boundary = `fresnel_top` series
- Lower boundary = `fresnel_bottom` series
- Color: light blue with ~20% opacity
- Where terrain intersects Fresnel zone, highlight in red/orange
- Show clearance info in the profile tooltip
- Add a summary badge: "LOS Clear ✓" / "Fresnel 60% Clear ⚠" / "LOS Blocked ✗"
---
## Feature 3: Interference Modeling (C/I Ratio)
### Description
Add carrier-to-interference ratio calculation to the coverage engine. For each grid point, calculate the C/I ratio: the signal from the serving cell vs the sum of signals from all other cells on the same frequency. Display as a separate heatmap layer.
### Implementation
**Backend changes:**
**File:** `backend/app/services/coverage_service.py` (or gpu_service.py)
Add C/I calculation after existing coverage computation:
```python
def calculate_interference(self, sites: list, coverage_results: dict) -> np.ndarray:
"""Calculate C/I ratio for each grid point.
For each point:
- C = signal strength from strongest (serving) cell
- I = sum of signal strengths from all other co-frequency cells
- C/I = C - 10*log10(sum of linear interference powers)
Returns array of C/I values in dB.
"""
# Get all RSRP grids (already calculated)
# For each point, find:
# 1. Best server (strongest signal) = C
# 2. Sum of all others on same frequency = I
# 3. C/I = C(dBm) - I(dBm)
# Group sites by frequency
freq_groups = {}
for site in sites:
freq = site.get("frequency_mhz", 1800)
if freq not in freq_groups:
freq_groups[freq] = []
freq_groups[freq].append(site)
# Only calculate interference for frequency groups with 2+ sites
# For single-site frequencies, C/I = infinity (no interference)
# The RSRP values are already in dBm, need to convert to linear for summing
# P_linear = 10^(P_dBm / 10)
# I_total_linear = sum(P_linear for all interferers)
# I_total_dBm = 10 * log10(I_total_linear)
# C/I = C_dBm - I_total_dBm
pass
```
**Key algorithm (for GPU pipeline in gpu_service.py):**
```python
# After computing RSRP for all sites at all grid points:
# rsrp_grid shape: (num_sites, num_points) in dBm
# Convert to linear (mW)
rsrp_linear = 10 ** (rsrp_grid / 10.0) # CuPy array
# For each point, best server
best_server_idx = cp.argmax(rsrp_grid, axis=0)
best_rsrp_linear = cp.take_along_axis(rsrp_linear, best_server_idx[cp.newaxis, :], axis=0)[0]
# Total power from all sites
total_power = cp.sum(rsrp_linear, axis=0)
# Interference = total - serving
interference_linear = total_power - best_rsrp_linear
# C/I ratio in dB
# Avoid log10(0) with small epsilon
epsilon = 1e-30
ci_ratio_db = 10 * cp.log10(best_rsrp_linear / (interference_linear + epsilon))
# Clip to reasonable range
ci_ratio_db = cp.clip(ci_ratio_db, -20, 50)
```
### Frontend Visualization
- Add a toggle in the coverage controls: "Show: Signal (RSRP) | Interference (C/I)"
- C/I heatmap uses different color scale:
- Dark red: < 0 dB (interference dominant — no service)
- Orange: 0-10 dB (marginal)
- Yellow: 10-20 dB (acceptable)
- Green: 20-30 dB (good)
- Blue: > 30 dB (excellent, minimal interference)
- The C/I map only makes sense with 2+ sites on same frequency
- Show warning if all sites are on different frequencies (no co-channel interference)
### API Response Extension
Add `ci_ratio` field to coverage calculation response alongside existing `rsrp` values.
---
## Testing Checklist
### Link Budget
- [ ] Panel opens from toolbar/sidebar
- [ ] Click on map sets Rx point
- [ ] Tx parameters auto-populate from selected site
- [ ] Link budget table shows all rows correctly
- [ ] Margin calculation is correct (manual verification)
- [ ] Color coding: green for positive margin, red for negative
- [ ] Line drawn on map from Tx to Rx
### Fresnel Zone
- [ ] Terrain profile shows Fresnel zone overlay
- [ ] Fresnel ellipse is widest at midpoint (correct shape)
- [ ] Red highlighting where terrain enters Fresnel zone
- [ ] Summary shows LOS/Fresnel status
- [ ] Works at different frequencies (zone size changes with frequency)
- [ ] Clearance values are reasonable (first Fresnel zone at 1800 MHz, 10km = ~22m radius at midpoint)
### Interference
- [ ] C/I toggle appears when 2+ sites exist
- [ ] C/I heatmap renders with correct color scale
- [ ] Single-site scenario shows "no interference" or infinite C/I
- [ ] Two sites on same frequency show interference zones between them
- [ ] C/I values are reasonable (> 20 dB near serving cell, < 10 dB at cell edge)
## Build & Deploy
```bash
cd D:\root\rfcp
# Backend — just restart uvicorn (Python, no build)
cd backend
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
# Frontend — rebuild if UI components changed
cd frontend
npm run build
# Full installer rebuild if needed
# (use existing build script)
```
## Commit Message
```
feat(rf): add link budget, Fresnel zone, and interference modeling
- Add /api/link-budget endpoint with full path analysis
- Add /api/fresnel-profile endpoint with zone clearance calculation
- Add C/I ratio computation to GPU coverage pipeline
- Add LinkBudgetPanel frontend component
- Add Fresnel zone overlay to terrain profile chart
- Add C/I heatmap toggle alongside RSRP display
- Group interference by frequency for co-channel analysis
```
## Success Criteria
1. Link budget shows correct margin for known test case (Dnipro, 10km, 1800MHz)
2. Fresnel zone visually shows ellipse on terrain profile
3. Two co-frequency sites show interference pattern between them
4. All three features work with existing terrain data (no new downloads needed)
5. GPU pipeline performance not significantly degraded by C/I calculation

View File

@@ -0,0 +1,210 @@
# RFCP — Iteration 3.10.1: UI/UX Bugfixes
## Overview
Four bugs found during 3.10 testing. All are frontend issues, no backend changes needed.
---
## Bug 1: Ruler places point when clicking Terrain Profile button
**Problem:** When Ruler mode is active and user clicks "Terrain Profile" button in the measurement overlay, it also places a ruler point on the map underneath. The click event propagates to the map.
**Fix:** Stop event propagation on the Terrain Profile button click handler. The Terrain Profile button (and any overlay UI elements) should call `e.stopPropagation()` to prevent the click from reaching the map layer.
Also review: any other UI overlays that sit on top of the map (Link Budget panel, coverage controls, etc.) should also stop propagation to prevent accidental ruler/site placement.
**Files to check:**
- MeasurementTool component (Terrain Profile button handler)
- Any overlay/popup components that sit on top of the Leaflet map
---
## Bug 2: Cursor should be default arrow, not hand; Ruler snap to site
**Problem A:** The map cursor shows as a grab/hand icon. Should be default arrow cursor for normal mode. Hand cursor should only appear when dragging the map.
**Fix A:** Set Leaflet map cursor styles:
```css
/* Default cursor */
.leaflet-container {
cursor: default !important;
}
/* Grabbing only when dragging */
.leaflet-container.leaflet-drag-target {
cursor: grabbing !important;
}
/* Crosshair for ruler mode */
.leaflet-container.ruler-mode {
cursor: crosshair !important;
}
/* Crosshair for RX point placement mode */
.leaflet-container.rx-placement-mode {
cursor: crosshair !important;
}
```
Apply CSS classes to the map container based on current mode. Remove Leaflet's default grab cursor.
**Problem B:** When using the ruler, it should be possible to snap the ruler start/end point exactly to a site (tower) location. Currently you have to eyeball it.
**Fix B:** When in ruler mode and clicking near a site marker (within ~20px), snap the ruler point to the exact site coordinates. This gives precise distance measurements from tower to any point.
```typescript
// In ruler click handler:
const SNAP_DISTANCE_PX = 20;
function findNearestSite(clickLatLng: L.LatLng, map: L.Map): Site | null {
const clickPoint = map.latLngToContainerPoint(clickLatLng);
let nearest: Site | null = null;
let minDist = Infinity;
for (const site of sites) {
const sitePoint = map.latLngToContainerPoint(L.latLng(site.lat, site.lon));
const dist = clickPoint.distanceTo(sitePoint);
if (dist < SNAP_DISTANCE_PX && dist < minDist) {
minDist = dist;
nearest = site;
}
}
return nearest;
}
// When placing ruler point:
const snappedSite = findNearestSite(clickLatLng, map);
if (snappedSite) {
// Use exact site coordinates
rulerPoint = L.latLng(snappedSite.lat, snappedSite.lon);
} else {
rulerPoint = clickLatLng;
}
```
---
## Bug 3: Link Budget Calculator text invisible + RX point not placed on map
**Problem A:** Text in Link Budget Calculator panel is black on dark background — invisible. The input fields and labels need light text color for dark theme.
**Fix A:** Ensure all text in LinkBudgetPanel uses light colors:
```css
/* All text in the panel should be light */
color: #e2e8f0; /* or whatever the app's light text color is */
/* Input fields */
input {
color: #e2e8f0;
background: #1e293b; /* dark input background */
border: 1px solid #475569;
}
/* Labels */
label {
color: #94a3b8; /* slightly muted for labels */
}
/* Values/results */
.result-value {
color: #f1f5f9; /* bright white for important values */
}
```
Check if the panel is using Tailwind classes — if so, ensure `text-slate-200` or similar is applied to the container. The panel likely inherits wrong text color or has hardcoded dark text.
**Problem B:** When clicking "Click on Map to Set RX Point" and then clicking on the map, the RX marker does not appear on the map. The coordinates might update in the fields but there's no visual indicator.
**Fix B:** When RX point is set:
1. Place a visible marker on the map at the RX location (use a different icon than the TX site — e.g., a small circle or pin in a different color like orange or blue)
2. Draw a dashed line from the TX site to the RX marker
3. The marker should be draggable to adjust position
4. When Link Budget panel is closed, remove the RX marker and line
```typescript
// RX marker icon (different from site markers)
const rxIcon = L.divIcon({
className: 'rx-marker',
html: '<div style="width: 12px; height: 12px; background: #f97316; border: 2px solid white; border-radius: 50%;"></div>',
iconSize: [12, 12],
iconAnchor: [6, 6],
});
// Place marker
const rxMarker = L.marker([rxLat, rxLon], { icon: rxIcon, draggable: true }).addTo(map);
// Dashed line from TX to RX
const linkLine = L.polyline([[txLat, txLon], [rxLat, rxLon]], {
color: '#f97316',
weight: 2,
dashArray: '8, 4',
opacity: 0.8,
}).addTo(map);
// Update on drag
rxMarker.on('drag', (e) => {
const pos = e.target.getLatLng();
linkLine.setLatLngs([[txLat, txLon], [pos.lat, pos.lng]]);
// Update Link Budget panel coordinates
updateRxCoordinates(pos.lat, pos.lng);
});
```
---
## Bug 4: Elevation color opacity not working
**Problem:** The opacity control for elevation/terrain colors on the map is not functioning. Adjusting the opacity slider has no effect on the terrain overlay visibility.
**Fix:** Check how the elevation overlay is rendered:
1. If it's a tile layer (Leaflet tile overlay), use `layer.setOpacity(value)`
2. If it's the topo map layer, the opacity needs to be applied to the correct layer reference
3. If it's the coverage heatmap opacity that's broken, check the canvas renderer opacity
The "Elev" button on the right toolbar likely toggles an elevation visualization. Find where this layer is created and ensure:
```typescript
// When opacity slider changes:
elevationLayer.setOpacity(opacityValue);
// Or if it's a canvas overlay:
const canvas = document.querySelector('.elevation-overlay');
if (canvas) {
canvas.style.opacity = String(opacityValue);
}
```
Also check: there might be TWO opacity controls that are confused:
- Coverage heatmap opacity (the RSRP colors)
- Terrain/elevation color overlay opacity (the topo colors)
Make sure each slider controls the correct layer.
---
## Testing Checklist
- [ ] Click Terrain Profile button with Ruler active — NO extra ruler point placed
- [ ] Default cursor is arrow, not hand
- [ ] Cursor changes to crosshair in Ruler mode
- [ ] Cursor changes to crosshair in RX placement mode
- [ ] Ruler snaps to site when clicking near tower marker
- [ ] Link Budget panel text is readable (light on dark)
- [ ] Clicking map in RX mode places visible orange marker
- [ ] Dashed line drawn from TX to RX
- [ ] RX marker removed when panel closes
- [ ] Elevation opacity slider actually changes overlay transparency
## Commit Message
```
fix(ui): resolve ruler propagation, cursor, link budget visibility, elevation opacity
- Stop click propagation on Terrain Profile button (prevents ruler point)
- Change default cursor to arrow, crosshair for tool modes
- Add ruler snap-to-site (20px threshold)
- Fix Link Budget panel text colors for dark theme
- Add RX marker and dashed line on map
- Fix elevation overlay opacity control binding
```

View File

@@ -0,0 +1,349 @@
# RFCP — Iteration 3.10.2: Tool Mode System & Click Fixes
## Root Cause
All click-related bugs share one root cause: multiple features compete for the same map click event. Ruler, RX point placement, site placement, and terrain profile all listen to map clicks simultaneously. There's no centralized "active tool" state that prevents conflicts.
## Solution: Active Tool Mode
Create a single source of truth for which tool is currently active. Only the active tool receives map click events.
### Tool Modes (mutually exclusive):
```typescript
type ActiveTool =
| 'none' // Default — pan/zoom only, no click actions
| 'ruler' // Distance measurement, click to add points
| 'rx-placement' // Link Budget RX point, single click
| 'site-placement' // Place new site on map
```
### Implementation
**1. Add to app store (Zustand):**
```typescript
// In the main store or a new toolStore:
interface ToolState {
activeTool: ActiveTool;
setActiveTool: (tool: ActiveTool) => void;
clearTool: () => void;
}
const useToolStore = create<ToolState>((set) => ({
activeTool: 'none',
setActiveTool: (tool) => set({ activeTool: tool }),
clearTool: () => set({ activeTool: 'none' }),
}));
```
**2. Map click handler — single entry point:**
Replace all individual map click listeners with ONE handler:
```typescript
// In the main Map component:
map.on('click', (e: L.LeafletMouseEvent) => {
const { activeTool } = useToolStore.getState();
switch (activeTool) {
case 'ruler':
handleRulerClick(e);
break;
case 'rx-placement':
handleRxPlacement(e);
break;
case 'site-placement':
handleSitePlacement(e);
break;
case 'none':
default:
// No action on map click — just pan/zoom
break;
}
});
```
**3. Cursor changes based on active tool:**
```typescript
useEffect(() => {
const container = map.getContainer();
// Remove all tool cursors
container.classList.remove('ruler-mode', 'rx-placement-mode', 'site-placement-mode');
switch (activeTool) {
case 'ruler':
container.classList.add('ruler-mode');
break;
case 'rx-placement':
container.classList.add('rx-placement-mode');
break;
case 'site-placement':
container.classList.add('site-placement-mode');
break;
default:
// Default cursor (arrow)
break;
}
}, [activeTool]);
```
**4. CSS for cursors:**
```css
.leaflet-container {
cursor: default !important;
}
.leaflet-container.leaflet-dragging {
cursor: grabbing !important;
}
.leaflet-container.ruler-mode {
cursor: crosshair !important;
}
.leaflet-container.rx-placement-mode {
cursor: crosshair !important;
}
.leaflet-container.site-placement-mode {
cursor: cell !important;
}
```
**5. UI buttons toggle tool mode:**
```typescript
// Ruler button:
const handleRulerToggle = () => {
if (activeTool === 'ruler') {
clearTool(); // Toggle off
} else {
setActiveTool('ruler'); // Activate ruler, deactivate others
}
};
// Link Budget "Click on Map to Set RX Point" button:
const handleRxModeToggle = () => {
if (activeTool === 'rx-placement') {
clearTool();
} else {
setActiveTool('rx-placement');
}
};
```
**6. Auto-deactivation:**
- RX placement: deactivate after single click (point is set)
- Ruler: stays active until toggled off or right-click finishes
- Site placement: deactivate after placing site
---
## Fix: Ruler Snap to Site
In the ruler click handler, check proximity to existing sites:
```typescript
function handleRulerClick(e: L.LeafletMouseEvent) {
const map = e.target;
const clickPoint = map.latLngToContainerPoint(e.latlng);
const SNAP_THRESHOLD_PX = 20;
// Check all site markers
let snappedLatLng = e.latlng;
let snapped = false;
for (const site of sites) {
const siteLatLng = L.latLng(site.lat, site.lon);
const sitePoint = map.latLngToContainerPoint(siteLatLng);
const pixelDist = clickPoint.distanceTo(sitePoint);
if (pixelDist < SNAP_THRESHOLD_PX) {
snappedLatLng = siteLatLng;
snapped = true;
break;
}
}
// Add ruler point at snapped or original location
addRulerPoint(snappedLatLng);
// Optional: visual feedback for snap
if (snapped) {
// Brief highlight on the site marker
}
}
```
---
## Fix: RX Point Placement + Visual Marker
When in 'rx-placement' mode and map is clicked:
```typescript
function handleRxPlacement(e: L.LeafletMouseEvent) {
const { lat, lng } = e.latlng;
// Update Link Budget panel coordinates
setRxCoordinates(lat, lng);
// Place visible marker on map
if (rxMarkerRef.current) {
rxMarkerRef.current.setLatLng([lat, lng]);
} else {
rxMarkerRef.current = L.marker([lat, lng], {
icon: L.divIcon({
className: 'rx-point-marker',
html: `<div style="
width: 14px; height: 14px;
background: #f97316;
border: 2px solid #fff;
border-radius: 50%;
box-shadow: 0 0 6px rgba(249,115,22,0.6);
"></div>`,
iconSize: [14, 14],
iconAnchor: [7, 7],
}),
draggable: true,
}).addTo(map);
// Update coords on drag
rxMarkerRef.current.on('drag', (ev) => {
const pos = ev.target.getLatLng();
setRxCoordinates(pos.lat, pos.lng);
});
}
// Draw dashed line from TX to RX
const selectedSite = getSelectedSite();
if (selectedSite && linkLineRef.current) {
linkLineRef.current.setLatLngs([[selectedSite.lat, selectedSite.lon], [lat, lng]]);
} else if (selectedSite) {
linkLineRef.current = L.polyline(
[[selectedSite.lat, selectedSite.lon], [lat, lng]],
{ color: '#f97316', weight: 2, dashArray: '8,4', opacity: 0.8 }
).addTo(map);
}
// Deactivate RX placement mode (single click action)
clearTool();
}
// Cleanup when Link Budget panel closes:
function cleanupRxMarker() {
if (rxMarkerRef.current) {
rxMarkerRef.current.remove();
rxMarkerRef.current = null;
}
if (linkLineRef.current) {
linkLineRef.current.remove();
linkLineRef.current = null;
}
}
```
---
## Fix: Terrain Profile Click-Through
The Terrain Profile popup and its "Terrain Profile" trigger button must stop event propagation:
```typescript
// On the Terrain Profile button in the measurement overlay:
<button
onClick={(e) => {
e.stopPropagation();
e.preventDefault();
showTerrainProfile();
}}
onMouseDown={(e) => e.stopPropagation()}
onPointerDown={(e) => e.stopPropagation()}
>
Terrain Profile
</button>
// On the Terrain Profile popup container:
<div
className="terrain-profile-popup"
onClick={(e) => e.stopPropagation()}
onMouseDown={(e) => e.stopPropagation()}
onPointerDown={(e) => e.stopPropagation()}
>
{/* ... chart content ... */}
</div>
```
Also ensure the popup/panel has `pointer-events: auto` and is positioned with a high z-index above the map.
With the tool mode system in place, this becomes less critical since clicking terrain profile UI won't trigger ruler (ruler mode would be separate), but stopping propagation is still good practice.
---
## Fix: Default Cursor (Not Hand)
Override Leaflet's default grab cursor:
```css
/* Global override in the app's main CSS */
.leaflet-container {
cursor: default !important;
}
/* Only show grab when actually dragging */
.leaflet-container.leaflet-dragging,
.leaflet-container:active {
cursor: grabbing !important;
}
/* Remove grab cursor from interactive layers too */
.leaflet-interactive {
cursor: default !important;
}
/* Tool-specific cursors applied via JS class toggle */
.leaflet-container.tool-ruler {
cursor: crosshair !important;
}
.leaflet-container.tool-rx-placement {
cursor: crosshair !important;
}
.leaflet-container.tool-site-placement {
cursor: cell !important;
}
```
---
## Testing Checklist
- [ ] Only ONE tool can be active at a time
- [ ] Activating Ruler deactivates RX placement and vice versa
- [ ] Default cursor is arrow (not hand/grab)
- [ ] Cursor changes to crosshair when Ruler is active
- [ ] Cursor changes to crosshair when RX placement is active
- [ ] Cursor shows grabbing only when dragging map
- [ ] Clicking Terrain Profile button does NOT place ruler point
- [ ] Clicking any UI panel/popup does NOT place ruler point
- [ ] Ruler point snaps to site marker when clicking within 20px
- [ ] RX point click places orange marker on map
- [ ] Dashed orange line appears from TX site to RX marker
- [ ] RX marker is draggable (updates coordinates in panel)
- [ ] RX marker removed when Link Budget panel closes
- [ ] Right-click finishes ruler measurement
## Commit Message
```
fix(tools): implement active tool mode system, fix click conflicts
- Add ActiveTool state (none/ruler/rx-placement/site-placement)
- Single map click handler dispatches to active tool only
- Fix cursor: default arrow, crosshair for tools, grabbing for drag
- Add ruler snap-to-site (20px threshold)
- Add RX marker with draggable orange dot and dashed line
- Stop event propagation on all UI overlays above map
- Clean up markers when panels close
```

View File

@@ -0,0 +1,106 @@
# RFCP — Iteration 3.10.3: Calculator Shortcut & Ruler Limit
## Two small UX changes, no backend.
---
## 1. Link Budget Calculator — Quick Access Button
Move calculator access to a visible toolbar button, not buried in Map Tools panel.
**Location:** Top-left corner of the map, below the zoom controls (+/- buttons). Similar to how Fit, Reset, Topo, Grid, Ruler, Elev buttons are in the top-right.
**Implementation:**
Add a button to the left toolbar (or create a small floating button group):
```typescript
// Top-left button, below zoom controls
<button
className="map-tool-btn"
onClick={() => setShowLinkBudget(!showLinkBudget)}
title="Link Budget Calculator"
>
{/* Calculator icon — use an emoji or SVG */}
🔗 {/* or a small "LB" text label, or a calculator SVG icon */}
</button>
```
**Styling:** Same visual style as the right-side tool buttons (Fit, Reset, Topo, Grid, Ruler, Elev) — dark rounded rectangle with light text/icon.
**Position options (pick one):**
- **Option A:** Add to the RIGHT toolbar stack below "Elev" button — keeps all tools together
- **Option B:** Floating button top-left below zoom — separate but prominent
- **Option C:** Add to the measurement overlay bar (near the ruler distance display)
Recommend **Option A** — add "LB" or calculator icon button to the right toolbar stack, below Elev. Consistent with existing UI pattern.
Also: Remove the "Hide Link Budget Calculator" button from Map Tools panel (or keep it as secondary toggle — but primary access should be the toolbar button).
---
## 2. Ruler — Maximum 2 Points Only
**Problem:** Ruler currently allows unlimited points, creating a web of measurement lines. For RF point-to-point measurement, only 2 points make sense: start and end.
**Fix:** Limit ruler to exactly 2 points. When both points are placed, the measurement is complete. To start a new measurement, clicking again replaces the first point and clears the old measurement.
```typescript
// In the map click handler for ruler mode:
function handleRulerClick(e: L.LeafletMouseEvent) {
const currentPoints = rulerPoints;
if (currentPoints.length === 0) {
// First point
setRulerPoints([snappedLatLng]);
} else if (currentPoints.length === 1) {
// Second point — measurement complete
setRulerPoints([currentPoints[0], snappedLatLng]);
// Optionally: auto-deactivate ruler mode after 2nd point
// clearTool(); // uncomment if you want one-shot behavior
} else {
// Already 2 points — start new measurement
// Replace: clear old points, start fresh with new first point
setRulerPoints([snappedLatLng]);
}
}
```
**Behavior:**
1. Click 1: Place start point (show marker)
2. Click 2: Place end point (show marker + line + distance label + Terrain Profile button)
3. Click 3: Clear previous, start new measurement from this click
4. Right-click or Escape: Cancel/clear ruler entirely
**Remove:**
- Remove "Right-click to finish" instruction (no longer needed — measurement auto-completes at 2 points)
- Remove multi-point polyline rendering (only single line between 2 points)
**Visual:**
- Show a single straight line between 2 points (green dashed, as current)
- Distance label at midpoint
- Terrain Profile button appears after 2nd point is placed
- Small circle markers at both endpoints
---
## Testing Checklist
- [ ] Calculator button visible in toolbar (right side, below Elev)
- [ ] Click calculator button opens/closes Link Budget panel
- [ ] Ruler allows exactly 2 points, no more
- [ ] Third click starts new measurement (replaces old)
- [ ] Escape clears ruler
- [ ] Distance + Terrain Profile button appears after 2nd point
- [ ] No multi-point web/polygon possible
- [ ] Ruler still snaps to site markers
## Commit Message
```
fix(ux): add calculator toolbar button, limit ruler to 2 points
- Add Link Budget Calculator button to right toolbar
- Limit ruler to exactly 2 points (point-to-point only)
- Third click starts new measurement, clears previous
- Remove multi-point polyline behavior
```

View File

@@ -0,0 +1,136 @@
# RFCP — Iteration 3.10.4: Terrain Profile Click Fix & TX Height
## Two bugs remaining from previous iterations.
---
## Bug 1: Terrain Profile click still places ruler point
**Problem:** Clicking inside the Terrain Profile popup (chart area, close button, fresnel checkbox, anywhere in the popup) triggers the map click handler underneath, which places a ruler point or resets the measurement.
**Previous fix was incomplete** — stopPropagation was added to some elements but not the entire popup container and its backdrop.
**Fix:** The Terrain Profile popup needs a FULL click barrier. Every mouse event must be caught:
```typescript
// The OUTERMOST container of the Terrain Profile popup:
<div
className="terrain-profile-container"
onClick={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
onMouseDown={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
onMouseUp={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
onPointerDown={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
onPointerUp={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
onDoubleClick={(e) => { e.stopPropagation(); e.nativeEvent.stopImmediatePropagation(); }}
>
{/* All terrain profile content */}
</div>
```
**IMPORTANT:** `stopPropagation()` alone may not be enough because Leaflet listens to DOM events directly, not React synthetic events. The fix MUST also call `e.nativeEvent.stopImmediatePropagation()` to prevent Leaflet's native DOM listener from firing.
**Alternative approach (more robust):** Add the popup OUTSIDE the Leaflet map container in the DOM tree. If the Terrain Profile div is a sibling or parent of the map div (not a child), Leaflet's event delegation won't catch clicks on it at all.
```tsx
// In the main layout:
<div className="app-layout">
<div id="map-container">
{/* Leaflet map renders here */}
</div>
{/* These are OUTSIDE the map container — Leaflet can't intercept */}
{showTerrainProfile && (
<TerrainProfile ... />
)}
{showLinkBudget && (
<LinkBudgetPanel ... />
)}
</div>
```
If moving outside the map container is too much refactoring, the stopImmediatePropagation approach should work. But check: is the TerrainProfile component rendered INSIDE a Leaflet pane or overlay? If so, moving it out is the correct fix.
**Also apply the same fix to:**
- Link Budget Calculator panel
- Any other floating panel/popup that sits over the map
---
## Bug 2: TX Height always shows 2m in Link Budget Calculator
**Problem:** The Link Budget Calculator TRANSMITTER section always shows `Height: 2m` regardless of the actual site configuration. It should read the height from the selected site's settings.
**Root cause:** The LinkBudgetPanel component likely reads `site.height` but the site object might store height in a different field name (e.g., `site.antennaHeight`, `site.towerHeight`, `site.params.height`, or per-sector height).
**Fix:** Find where site height is stored and pass the correct value:
```typescript
// In LinkBudgetPanel.tsx, find where TX height is set:
// WRONG (probably current):
const txHeight = site.height || 2; // Defaults to 2 if field is missing
// Check the actual site data structure. It might be:
const txHeight = site.antennaHeight
|| site.tower_height
|| site.params?.height
|| site.sectors?.[0]?.height // If height is per-sector
|| 30; // Default should be 30m for a typical cell tower, not 2m
// Or if height is stored in meters in a nested config:
const txHeight = selectedSite?.config?.height || selectedSite?.height || 30;
```
**Steps to debug:**
1. In the browser console (F12), find the selected site object
2. Check what field contains the height value
3. Update LinkBudgetPanel to read from the correct field
**Display fix:**
```typescript
// In the TRANSMITTER section of the panel:
<div className="param-row">
<span>Height:</span>
<span>{txHeight} m</span>
</div>
```
The height should also be EDITABLE in the link budget calculator (as an input field, not just display), since you might want to test "what if I put the antenna at 40m instead of 30m?" without changing the actual site config.
```typescript
// Make height an editable field with site value as default:
const [txHeightOverride, setTxHeightOverride] = useState<number | null>(null);
const txHeight = txHeightOverride ?? (site?.height || 30);
<div className="param-row">
<label>Height:</label>
<input
type="number"
value={txHeight}
onChange={(e) => setTxHeightOverride(parseFloat(e.target.value))}
/> m
</div>
```
---
## Testing Checklist
- [ ] Click ANYWHERE inside Terrain Profile popup — NO ruler point placed
- [ ] Click Terrain Profile close button (X) — popup closes, no ruler point
- [ ] Click Fresnel Zone checkbox — toggles, no ruler point
- [ ] Click chart area — no ruler point
- [ ] Drag/scroll inside chart — no map pan/zoom
- [ ] TX Height in Link Budget shows actual site height (not 2m)
- [ ] TX Height is editable for what-if scenarios
- [ ] Changing TX height recalculates link budget
## Commit Message
```
fix(ui): block all click propagation from terrain profile, fix TX height
- Add stopImmediatePropagation on terrain profile container
- Prevent all mouse/pointer events from reaching Leaflet map
- Fix TX height reading from site config (was defaulting to 2m)
- Make TX height editable in link budget calculator
```

View File

@@ -0,0 +1,130 @@
# RFCP 3.6.0 — Production GPU Build (Claude Code Task)
## Goal
Build `rfcp-server.exe` (PyInstaller) with CuPy GPU support so production RFCP
detects the NVIDIA GPU without manual `pip install`.
Currently production exe shows "CPU (NumPy)" because CuPy is not bundled.
## Current Environment (CONFIRMED WORKING)
```
Windows 10 (10.0.26200)
Python 3.11.8 (C:\Python311)
NVIDIA GeForce RTX 4060 Laptop GPU (8 GB VRAM)
CUDA Toolkit 13.1 (C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1)
CUDA_PATH = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1
Packages:
cupy-cuda13x 13.6.0 ← NOT cuda12x!
numpy 1.26.4
scipy 1.17.0
fastrlock 0.8.3
pyinstaller 6.18.0
GPU compute verified:
python -c "import cupy; a = cupy.array([1,2,3]); print(a.sum())" → 6 ✅
```
## What We Already Tried (And Why It Failed)
### Attempt 1: ONEFILE spec with collect_all('cupy')
- `collect_all('cupy')` returns 1882 datas, **0 binaries** — CuPy pip doesn't bundle DLLs on Windows
- CUDA DLLs come from two separate sources:
- **nvidia pip packages** (14 DLLs in `C:\Python311\Lib\site-packages\nvidia\*/bin/`)
- **CUDA Toolkit** (13 DLLs in `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.1\bin\x64\`)
- We manually collected these 27 DLLs in the spec
- Build succeeded (3 GB exe!) but crashed on launch:
```
[PYI-10456:ERROR] Failed to extract cufft64_12.dll: decompression resulted in return code -1!
```
- Root cause: `cufft64_12.dll` is 297 MB — PyInstaller's zlib compression fails on it in ONEFILE mode
### Attempt 2: We were about to try ONEDIR but haven't built it yet
### Key Insight: Duplicate DLLs from two sources
nvidia pip packages have CUDA 12.x DLLs (cublas64_12.dll etc.)
CUDA Toolkit 13.1 has CUDA 13.x DLLs (cublas64_13.dll etc.)
CuPy-cuda13x needs the 13.x versions. The 12.x from pip may conflict.
## What Needs To Happen
1. **Build rfcp-server as ONEDIR** (folder with exe + DLLs, not single exe)
- This avoids the decompression crash with large CUDA DLLs
- Output: `backend/dist/rfcp-server/rfcp-server.exe` + all DLLs alongside
2. **Include ONLY the correct CUDA DLLs**
- Prefer CUDA Toolkit 13.1 DLLs (match cupy-cuda13x)
- The nvidia pip packages have cuda12x DLLs — may cause version conflicts
- Key DLLs needed: cublas, cusparse, cusolver, curand, cufft, nvrtc, cudart
3. **Exclude bloat** — the previous build pulled in tensorflow, grpc, opentelemetry etc.
making it 3 GB. Real size should be ~600-800 MB.
4. **Test the built exe** — run it standalone and verify:
- `curl http://localhost:8090/api/health` returns `"build": "gpu"`
- `curl http://localhost:8090/api/gpu/status` returns `"available": true`
- Or at minimum: the exe starts without errors and CuPy imports successfully
5. **Update Electron integration** if needed:
- Current Electron expects a single `rfcp-server.exe` file
- With ONEDIR, it's a folder `rfcp-server/rfcp-server.exe`
- File: `desktop/main.js` or `desktop/src/main.ts` — look for where it spawns backend
- The path needs to change from `resources/backend/rfcp-server.exe`
to `resources/backend/rfcp-server/rfcp-server.exe`
## File Locations
```
D:\root\rfcp\
├── backend\
│ ├── run_server.py ← PyInstaller entry point
│ ├── app\
│ │ ├── main.py ← FastAPI app
│ │ ├── services\
│ │ │ ├── gpu_backend.py ← GPU detection (CuPy/NumPy fallback)
│ │ │ └── coverage_service.py ← Uses get_array_module()
│ │ └── api\routes\gpu.py ← /api/gpu/status, /api/gpu/diagnostics
│ ├── dist\ ← PyInstaller output goes here
│ └── build\ ← PyInstaller build cache
├── installer\
│ ├── rfcp-server-gpu.spec ← GPU spec (needs fixing)
│ ├── rfcp-server.spec ← CPU spec (working, don't touch)
│ ├── rfcp.ico ← Icon (exists)
│ └── build-gpu.bat ← Build script
├── desktop\
│ ├── main.js or src/main.ts ← Electron main process
│ └── resources\backend\ ← Where production exe lives
└── frontend\ ← React frontend (no changes needed)
```
## Existing CPU spec for reference
The working CPU-only spec is at `installer/rfcp-server.spec`. Use it as the base
and ADD CuPy + CUDA on top. Don't reinvent the wheel.
## Build Command
```powershell
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
```
## Success Criteria
- [ ] `dist/rfcp-server/rfcp-server.exe` starts without errors
- [ ] CuPy imports successfully inside the exe (no missing DLL errors)
- [ ] `/api/gpu/status` returns `"available": true, "device": "RTX 4060"`
- [ ] Total folder size < 1 GB (ideally 600-800 MB)
- [ ] No tensorflow/grpc/opentelemetry bloat
- [ ] Electron can find and launch the backend (path updated if needed)
## Important Notes
- Do NOT use cupy-cuda12x — we migrated to cupy-cuda13x
- Do NOT try ONEFILE mode — cufft64_12.dll (297 MB) crashes decompression
- The nvidia pip packages (nvidia-cublas-cu12, etc.) are still installed but may
conflict with CUDA Toolkit 13.1 — prefer Toolkit DLLs
- `collect_all('cupy')` gives 0 binaries on Windows — DLLs must be manually specified
- gpu_backend.py already handles CuPy absence gracefully (falls back to NumPy)

View File

@@ -0,0 +1,133 @@
# RFCP 3.7.0 — GPU-Accelerated Coverage Calculations
## Context
Iteration 3.6.0 completed: CuPy-cuda13x works in production PyInstaller build,
RTX 4060 detected, ONEDIR build with CUDA DLLs. BUT coverage calculations still
run on CPU because coverage_service.py uses `import numpy as np` directly instead
of the GPU backend.
The GPU infrastructure is ready:
- `app/services/gpu_backend.py` has `GPUManager.get_array_module()` → returns cupy or numpy
- `/api/gpu/status` confirms `"active_backend": "cuda"`
- CuPy is imported and GPU detected in the frozen exe
## Goal
Replace direct `np.` calls in coverage_service.py with `xp = gpu_manager.get_array_module()`
so calculations run on GPU when available, with automatic NumPy fallback.
## Files to Modify
### `app/services/coverage_service.py`
**Line 7**: `import numpy as np` — keep this but also import gpu_manager
Add near top:
```python
from app.services.gpu_backend import gpu_manager
```
**Key sections to GPU-accelerate** (highest impact first):
#### 1. Grid array creation (lines 549-550, 922-923)
```python
# BEFORE:
grid_lats = np.array([lat for lat, lon in grid])
grid_lons = np.array([lon for lat, lon in grid])
# AFTER:
xp = gpu_manager.get_array_module()
grid_lats = xp.array([lat for lat, lon in grid])
grid_lons = xp.array([lon for lat, lon in grid])
```
#### 2. Trig calculations (line 468, 1031, 1408-1415, 1442)
These use np.cos, np.radians, np.sin, np.degrees, np.arctan2 — all have CuPy equivalents.
```python
# BEFORE:
lon_delta = settings.radius / (111000 * np.cos(np.radians(center_lat)))
cos_lat = np.cos(np.radians(center_lat))
# AFTER:
xp = gpu_manager.get_array_module()
lon_delta = settings.radius / (111000 * float(xp.cos(xp.radians(center_lat))))
cos_lat = float(xp.cos(xp.radians(center_lat)))
```
#### 3. The heavy calculation loop — `_run_point_loop` (line 1070) and `_calculate_point_sync` (line 1112)
This is where 90% of time is spent. Currently processes points one-by-one.
The GPU win comes from vectorizing the path loss calculation across ALL grid points at once.
**Strategy**: Instead of looping through points, create arrays of all distances/angles
and compute path loss for all points in one vectorized operation.
#### 4. `_calculate_bearing` (line 1402) — already vectorizable
```python
# All np.* functions here have direct CuPy equivalents
# Just replace np → xp
```
## Important Rules
1. **Always get xp at function scope**, not module scope:
```python
def my_function(self, ...):
xp = gpu_manager.get_array_module()
# use xp instead of np
```
2. **Convert GPU arrays back to CPU** before returning to non-GPU code:
```python
if hasattr(result, 'get'): # CuPy array
result = result.get() # → numpy array
```
3. **Keep np for small/scalar operations** — GPU overhead isn't worth it for single values.
Only use xp for array operations on 100+ elements.
4. **Don't break the fallback** — if CuPy isn't available, `get_array_module()` returns numpy,
so `xp.array()` etc. work identically.
5. **Test both paths** — run with GPU and verify same results as CPU.
## Testing
After changes:
```powershell
# Rebuild
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
# Run
.\dist\rfcp-server\rfcp-server.exe
# Test calculation via frontend — watch Task Manager GPU utilization
# Should see GPU Compute spike during coverage calculation
# Time should be significantly faster than 10s for 1254 points
```
Compare before/after:
- Current (CPU): ~10s for 1254 points, 5km radius
- Expected (GPU): 1-3s for same calculation
Also test GPU diagnostics:
```
curl http://localhost:8888/api/gpu/diagnostics
```
## What NOT to Change
- Don't modify gpu_backend.py — it's working correctly
- Don't change the API endpoints or response format
- Don't remove the NumPy import — keep it for non-array operations
- Don't change propagation model math — only the array operations
- Don't change _filter_buildings_to_bbox or OSM functions — they use lists not arrays
## Success Criteria
- [ ] Coverage calculation uses GPU (visible in Task Manager)
- [ ] Calculation time reduced for 1000+ point grids
- [ ] CPU fallback still works (test by setting active_backend to cpu via API)
- [ ] Same coverage results (heatmap should look identical)
- [ ] No regression in tiled processing mode

View File

@@ -0,0 +1,181 @@
# RFCP 3.8.0 — Vectorize Per-Point Coverage Calculations
## Context
Iteration 3.7.0 added GPU precompute for distances + base path loss (Phase 2.5).
But Phase 3 (per-point loop) still runs on CPU, one point at a time across workers.
This is where 95% of time goes on Full preset (195s for 6,642 points).
Current pipeline:
```
Phase 2.5 (GPU, 0.01s): distances + base path_loss → precomputed arrays
Phase 3 (CPU, 195s): per-point terrain_loss, building_loss, reflections, vegetation
```
Goal: Vectorize the heavy per-point calculations so GPU handles them in bulk.
## Architecture
The key insight: `_calculate_point_sync` (line ~1127) does these steps per point:
1. **Terrain LOS check** — get elevation profile between site and point, check clearance
2. **Diffraction loss** — knife-edge based on Fresnel zone clearance
3. **Building obstruction** — find buildings between site and point, calculate penetration loss
4. **Materials penalty** — add loss based on building material type
5. **Dominant path analysis** — LOS vs reflection vs diffraction
6. **Street canyon** — check if point is in urban canyon
7. **Reflections** — find reflection paths off buildings (most expensive!)
8. **Vegetation loss** — check vegetation between site and point
9. **Final RSRP** — tx_power - path_loss - terrain_loss - building_loss - veg_loss + gains
## Strategy: Vectorize in Stages
NOT everything can be vectorized equally. Prioritize by time spent:
### Stage 1: Terrain LOS + Diffraction (HIGH IMPACT)
Currently: For each point, sample ~50-100 elevation values along radial path,
find min clearance, compute knife-edge diffraction.
**Vectorize**: Create 2D elevation profiles for ALL points at once.
- All points share the same site location
- For N points, create N terrain profiles (each M samples)
- Compute Fresnel clearance for all profiles vectorized
- Compute diffraction loss vectorized
```python
# Instead of per-point:
for point in grid:
profile = get_terrain_profile(site, point, num_samples=50)
clearance = min_clearance(profile)
loss = diffraction_loss(clearance, freq)
# Vectorized:
xp = gpu_manager.get_array_module()
# all_profiles shape: (N_points, M_samples)
all_profiles = get_terrain_profiles_batch(site, all_points, num_samples=50)
all_clearances = compute_clearances_batch(all_profiles, site_elev, point_elevs, distances)
all_terrain_loss = diffraction_loss_batch(all_clearances, freq)
```
### Stage 2: Building Obstruction (HIGH IMPACT)
Currently: For each point, find nearby buildings, check if they obstruct path.
**Vectorize**: Use spatial indexing but batch the geometry checks.
- Pre-compute building bounding boxes as GPU arrays
- For each point, ray-building intersection can be done as matrix operation
- Building penetration loss is simple lookup after intersection
NOTE: This is harder to vectorize because each point has different number of
nearby buildings. Options:
a) Pad to max buildings per point (wastes memory but simple)
b) Use sparse representation
c) Keep per-point but use GPU for the geometry math
Recommend option (c) initially — keep the spatial query on CPU but move
the trig/geometry calculations to GPU.
### Stage 3: Reflections (MEDIUM IMPACT, only on Full preset)
Currently: For each point with buildings, compute reflection paths.
This is the most complex calculation and hardest to vectorize.
**Approach**: Keep reflections per-point for now, but optimize the inner math
with vectorized operations.
### Stage 4: Vegetation Loss (LOW IMPACT)
Simple lookup — not worth GPU overhead.
## Implementation Plan
### Step 1: Batch terrain profiling
Add to coverage_service.py a new method:
```python
def _batch_terrain_profiles(self, site_lat, site_lon, site_elev,
grid_lats, grid_lons, grid_elevs,
distances, frequency, num_samples=50):
"""Compute terrain LOS and diffraction loss for all points at once."""
xp = gpu_manager.get_array_module()
N = len(grid_lats)
# Interpolate terrain profiles for all points
# Each profile: site → point, num_samples elevation values
# Use terrain tile data directly
# Compute Fresnel zone clearance for each profile
# Compute knife-edge diffraction loss
return terrain_losses # shape (N,)
```
### Step 2: Batch building check
Add method:
```python
def _batch_building_obstruction(self, site_lat, site_lon,
grid_lats, grid_lons,
distances, buildings_spatial_index,
all_buildings):
"""Compute building loss for all points at once."""
# For each point, query spatial index (CPU)
# Batch the geometry intersection math (GPU)
# Return losses
return building_losses # shape (N,)
```
### Step 3: Replace _run_point_loop
Instead of ProcessPool workers, do:
```python
# In calculate_coverage, after Phase 2.5:
terrain_losses = self._batch_terrain_profiles(...)
building_losses = self._batch_building_obstruction(...)
# Final RSRP is now fully vectorized:
rsrp = tx_power - precomputed_path_loss - terrain_losses - building_losses - veg_losses
# + antenna_gains + reflection_gains
```
### Step 4: Keep worker fallback
If GPU not available or for very complex calculations (reflections),
fall back to the existing per-point ProcessPool approach.
## Important Notes
1. **GPU code only in main process** — learned from 3.7.0, never import gpu_manager in workers
2. **Terrain data access** — terrain tiles are in memory, need efficient sampling for batch profiles
3. **CuPy ↔ NumPy bridge** — use `xp.asnumpy()` or `.get()` to convert back to CPU
4. **Memory** — 6,642 points × 50 terrain samples = 332,100 floats = 2.5 MB on GPU, no problem
5. **Accuracy** — results must match existing per-point calculation within 1 dB
## Testing
```powershell
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
.\dist\rfcp-server\rfcp-server.exe
```
Compare Full preset:
- Before (3.7.0): ~195s for 6,642 points
- Target (3.8.0): <30s for same calculation
- Stretch goal: <10s
Verify accuracy:
- Run same location with GPU and CPU backend
- Compare RSRP values — should be within 1 dB
- Coverage percentages (Excellent/Good/Fair/Weak) should be very close
## What NOT to Change
- Don't modify propagation model math (Okumura-Hata, COST-231, Free-Space formulas)
- Don't change API endpoints or response format
- Don't remove the ProcessPool fallback — keep it for CPU-only mode
- Don't change OSM fetching or caching
- Don't modify the frontend
## Success Criteria
- [ ] Full preset completes in <30s (was 195s)
- [ ] Standard preset completes in <5s (was 7.2s)
- [ ] No CuPy errors in worker processes
- [ ] CPU fallback still works
- [ ] Results match within 1 dB accuracy
- [ ] GPU utilization visible in Task Manager during calculation

View File

@@ -0,0 +1,436 @@
# RFCP 3.9.0 — SRTM1 Real Terrain Data Integration
## Context
RFCP currently downloads terrain tiles from an elevation API at runtime.
This works but has limitations:
- Requires internet connection
- Unknown data source quality
- No offline capability (critical for tactical/field use)
- No control over resolution or caching
Goal: Replace with SRTM1 (30m resolution) HGT files, offline-first architecture.
## SRTM1 Data Format
HGT files are dead simple:
-×1° tiles, named by southwest corner: `N48E033.hgt`
- 3601×3601 grid of signed 16-bit integers (big-endian)
- Each value = elevation in meters
- File size: exactly 25,934,402 bytes (3601 × 3601 × 2)
- Row order: north to south (first row = northernmost)
- Column order: west to east
- Adjacent tiles overlap by 1 pixel on shared edges
- Void/no-data value: -32768
Compressed (.hgt.zip): ~10-15 MB per tile typically.
## Architecture
### Tile Storage Layout
```
{app_data}/terrain/
├── srtm1/ # 30m resolution tiles
│ ├── N48E033.hgt # Uncompressed for fast access
│ ├── N48E034.hgt
│ ├── N48E035.hgt
│ └── ...
├── tile_index.json # Metadata: available tiles, checksums, dates
└── downloads/ # Temporary download staging
```
On Windows, `{app_data}` = the application's data directory.
For PyInstaller exe: `data/terrain/` relative to exe location.
The path must be configurable (environment variable or config file).
### Tile Manager (new file: `terrain_manager.py`)
```python
class SRTMTileManager:
"""Manages SRTM1 HGT tile storage, loading, and caching."""
def __init__(self, terrain_dir: str):
self.terrain_dir = Path(terrain_dir)
self.srtm1_dir = self.terrain_dir / "srtm1"
self.srtm1_dir.mkdir(parents=True, exist_ok=True)
# In-memory cache: tile_name -> numpy array
self._tile_cache: Dict[str, np.ndarray] = {}
self._max_cache_tiles = 16 # ~16 tiles = ~400 MB RAM
def get_tile_name(self, lat: float, lon: float) -> str:
"""Convert lat/lon to SRTM tile name."""
# Floor to get southwest corner
lat_int = int(lat) if lat >= 0 else int(lat) - 1
lon_int = int(lon) if lon >= 0 else int(lon) - 1
lat_prefix = "N" if lat_int >= 0 else "S"
lon_prefix = "E" if lon_int >= 0 else "W"
return f"{lat_prefix}{abs(lat_int):02d}{lon_prefix}{abs(lon_int):03d}"
def get_required_tiles(self, center_lat, center_lon, radius_km) -> List[str]:
"""Determine which tiles are needed for a coverage calculation."""
# Calculate bounding box from center + radius
# Return list of tile names
def has_tile(self, tile_name: str) -> bool:
"""Check if tile exists locally."""
return (self.srtm1_dir / f"{tile_name}.hgt").exists()
def load_tile(self, tile_name: str) -> Optional[np.ndarray]:
"""Load tile from disk into memory. Returns 3601x3601 int16 array."""
if tile_name in self._tile_cache:
return self._tile_cache[tile_name]
hgt_path = self.srtm1_dir / f"{tile_name}.hgt"
if not hgt_path.exists():
return None
# Read raw HGT: big-endian signed 16-bit
data = np.fromfile(str(hgt_path), dtype='>i2')
tile = data.reshape((3601, 3601))
# Replace void values
tile = tile.astype(np.float32)
tile[tile == -32768] = np.nan
# Cache management (LRU-style: evict oldest if full)
if len(self._tile_cache) >= self._max_cache_tiles:
oldest_key = next(iter(self._tile_cache))
del self._tile_cache[oldest_key]
self._tile_cache[tile_name] = tile
return tile
def get_elevation(self, lat: float, lon: float) -> Optional[float]:
"""Get elevation at a single point with bilinear interpolation."""
tile_name = self.get_tile_name(lat, lon)
tile = self.load_tile(tile_name)
if tile is None:
return None
return self._bilinear_sample(tile, lat, lon)
def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
"""Get elevations for array of points. Vectorized."""
# Group points by tile
# Load needed tiles
# Vectorized bilinear interpolation per tile
# Return array of elevations
async def download_tile(self, tile_name: str) -> bool:
"""Download a single tile from remote source (if online)."""
# Try multiple sources in order:
# 1. Own server (future: UMTC sync endpoint)
# 2. srtm.fasma.org (no auth required)
# 3. viewfinderpanoramas.org (no auth, void-filled)
# Returns True if successful
def get_missing_tiles(self, center_lat, center_lon, radius_km) -> List[str]:
"""Check which needed tiles are not available locally."""
required = self.get_required_tiles(center_lat, center_lon, radius_km)
return [t for t in required if not self.has_tile(t)]
```
### Bilinear Interpolation (CRITICAL for accuracy)
Current system uses nearest-neighbor (pick closest grid cell).
SRTM1 at 30m means nearest-neighbor can have 15m positional error.
Bilinear interpolation reduces this to sub-meter accuracy.
```python
def _bilinear_sample(self, tile: np.ndarray, lat: float, lon: float) -> float:
"""Sample elevation with bilinear interpolation."""
# Tile southwest corner
lat_int = int(lat) if lat >= 0 else int(lat) - 1
lon_int = int(lon) if lon >= 0 else int(lon) - 1
# Fractional position within tile (0.0 to 1.0)
lat_frac = lat - lat_int # 0 = south edge, 1 = north edge
lon_frac = lon - lon_int # 0 = west edge, 1 = east edge
# Convert to row/col (note: rows go north to south!)
row_exact = (1.0 - lat_frac) * 3600.0 # 0 = north, 3600 = south
col_exact = lon_frac * 3600.0 # 0 = west, 3600 = east
# Four surrounding grid points
r0 = int(row_exact)
c0 = int(col_exact)
r1 = min(r0 + 1, 3600)
c1 = min(c0 + 1, 3600)
# Fractional position between grid points
dr = row_exact - r0
dc = col_exact - c0
# Bilinear interpolation
z00 = tile[r0, c0]
z01 = tile[r0, c1]
z10 = tile[r1, c0]
z11 = tile[r1, c1]
# Handle NaN (void) values
if np.isnan(z00) or np.isnan(z01) or np.isnan(z10) or np.isnan(z11):
# Fall back to nearest non-NaN
valid = [(z00, 0, 0), (z01, 0, 1), (z10, 1, 0), (z11, 1, 1)]
valid = [(z, r, c) for z, r, c in valid if not np.isnan(z)]
return valid[0][0] if valid else 0.0
elevation = (z00 * (1 - dr) * (1 - dc) +
z01 * (1 - dr) * dc +
z10 * dr * (1 - dc) +
z11 * dr * dc)
return float(elevation)
```
### Vectorized Batch Elevation (for GPU pipeline)
This replaces the current `_batch_elevation_lookup` in gpu_service.py.
Must handle multi-tile seamlessly.
```python
def get_elevations_batch(self, lats: np.ndarray, lons: np.ndarray) -> np.ndarray:
"""Vectorized elevation lookup with bilinear interpolation.
Handles points spanning multiple tiles efficiently.
Groups points by tile, processes each tile with full NumPy vectorization.
"""
elevations = np.zeros(len(lats), dtype=np.float32)
# Compute tile indices for each point
lat_ints = np.where(lats >= 0, np.floor(lats).astype(int),
np.floor(lats).astype(int))
lon_ints = np.where(lons >= 0, np.floor(lons).astype(int),
np.floor(lons).astype(int))
# Group by tile
tile_keys = lat_ints * 1000 + lon_ints # unique key per tile
unique_keys = np.unique(tile_keys)
for key in unique_keys:
mask = tile_keys == key
lat_int = int(key // 1000)
lon_int = int(key % 1000)
if lon_int > 500: # handle negative longitudes
lon_int -= 1000
tile_name = self._make_tile_name(lat_int, lon_int)
tile = self.load_tile(tile_name)
if tile is None:
elevations[mask] = 0.0 # no data
continue
# Vectorized bilinear for all points in this tile
tile_lats = lats[mask]
tile_lons = lons[mask]
lat_frac = tile_lats - lat_int
lon_frac = tile_lons - lon_int
row_exact = (1.0 - lat_frac) * 3600.0
col_exact = lon_frac * 3600.0
r0 = np.clip(row_exact.astype(int), 0, 3599)
c0 = np.clip(col_exact.astype(int), 0, 3599)
r1 = np.clip(r0 + 1, 0, 3600)
c1 = np.clip(c0 + 1, 0, 3600)
dr = row_exact - r0
dc = col_exact - c0
z00 = tile[r0, c0]
z01 = tile[r0, c1]
z10 = tile[r1, c0]
z11 = tile[r1, c1]
result = (z00 * (1 - dr) * (1 - dc) +
z01 * (1 - dr) * dc +
z10 * dr * (1 - dc) +
z11 * dr * dc)
# Handle NaN voids
nan_mask = np.isnan(result)
if nan_mask.any():
result[nan_mask] = 0.0
elevations[mask] = result
return elevations
```
## Integration Points
### 1. Replace terrain_service.py elevation lookup
Current terrain service downloads elevation data from an API.
Replace with SRTMTileManager calls:
```python
# OLD:
elevation = await self.terrain_service.get_elevation(lat, lon)
# NEW:
elevation = self.tile_manager.get_elevation(lat, lon)
# Or for batch (GPU pipeline Phase 2.6):
elevations = self.tile_manager.get_elevations_batch(lats_array, lons_array)
```
### 2. Replace _batch_elevation_lookup in gpu_service.py
The vectorized elevation lookup in gpu_service.py currently loads tiles
and does nearest-neighbor sampling. Replace with tile_manager.get_elevations_batch()
which does bilinear interpolation.
### 3. Coverage service pre-check
Before starting calculation, check if all needed tiles are available:
```python
missing = self.tile_manager.get_missing_tiles(site_lat, site_lon, radius_km)
if missing:
if has_internet:
# Try to download missing tiles
for tile_name in missing:
await self.tile_manager.download_tile(tile_name)
else:
# Return warning to frontend
return {"warning": f"Missing terrain tiles: {missing}. Using flat terrain."}
```
### 4. Frontend notification
When tiles are missing, show a warning banner:
"⚠ Terrain data not available for this area. Coverage accuracy reduced."
When tiles are being downloaded:
"⬇ Downloading terrain data... (N48E033.hgt, 12.5 MB)"
### 5. Terrain Profile Viewer
The terrain profile viewer should use the same tile_manager
for consistent elevation data. With bilinear interpolation,
profiles will be much smoother and more accurate.
## Download Sources (Priority Order)
For auto-download when online:
1. **srtm.fasma.org** (no auth, direct HGT.zip download)
URL: `https://srtm.fasma.org/N48E033.SRTMGL1.hgt.zip`
- Free, no registration
- SRTM1 (30m) data
- May be slow or unreliable
2. **viewfinderpanoramas.org** (no auth, void-filled data)
URL: `http://viewfinderpanoramas.org/dem1/{region}/{tile}.hgt.zip`
- Free, no registration
- Void areas filled from topographic maps
- Better quality in mountainous areas
- File naming might differ by region
3. **Future: UMTC sync server**
URL: `https://rfcp.{your-domain}/api/terrain/tiles/{tile_name}.hgt`
- Self-hosted on your infrastructure
- Accessible via WireGuard mesh
- Can pre-populate with full Ukraine dataset
## Offline Bundle Strategy
For installer / field deployment:
### Option A: Region packs
Pre-package tiles by operational area:
- `terrain-dnipro.zip` — 4 tiles around Dnipro area (~100 MB)
- `terrain-ukraine-east.zip` — ~50 tiles, eastern Ukraine (~1.2 GB)
- `terrain-ukraine-full.zip` — ~171 tiles, all Ukraine (~4.3 GB)
### Option B: On-demand with cache
Ship empty, download tiles as needed on first calculation.
Cache permanently. Works well for development/testing.
### Option C: Live USB bundle
For tactical deployment, include full Ukraine terrain data
on the live USB alongside the application. 4.3 GB is acceptable
for a USB drive.
Recommend: **Option B for now** (development), **Option C for deployment**.
## File Changes
### New Files
- `backend/app/services/terrain_manager.py` — SRTMTileManager class
### Modified Files
- `backend/app/services/terrain_service.py` — Replace API calls with tile_manager
- `backend/app/services/gpu_service.py` — Replace _batch_elevation_lookup
- `backend/app/services/coverage_service.py` — Add missing tile pre-check
- `backend/app/main.py` — Initialize tile_manager on startup
### Config
- Add `TERRAIN_DIR` environment variable / config option
- Default: `./data/terrain` relative to backend exe
## Testing
```powershell
# Build and test
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --noconfirm
.\dist\rfcp-server\rfcp-server.exe
```
### Test 1: First run (no tiles cached)
- Start app, trigger calculation
- Should attempt to download required tile(s)
- If online: downloads, caches, calculates
- If offline: warning, flat terrain fallback
### Test 2: Cached tiles
- Run same calculation again
- Tile loaded from disk cache, no download
- Should be fast (tile load from disk < 100ms)
### Test 3: Accuracy comparison
- Compare elevation at known points (e.g., Dnipro city center)
- Cross-reference with Google Earth elevation
- Expected accuracy: ±5m horizontal, ±16m vertical (SRTM spec)
### Test 4: Multi-tile calculation
- Set radius to 50km+ to span multiple tiles
- Verify seamless stitching at tile boundaries
- No elevation jumps or artifacts at edges
### Test 5: Terrain profile
- Draw terrain profile across tile boundary
- Should be smooth, no discontinuity
- Compare with Google Earth profile for same path
### Test 6: Performance
- Tile load time from disk: <100ms
- Batch elevation lookup (6000 points): <50ms
- Should not regress overall calculation time
- Memory: ~25 MB per loaded tile, max 16 tiles = 400 MB
## What NOT to Change
- Don't modify GPU pipeline architecture (Phase 2.5/2.6/2.7)
- Don't change propagation model math
- Don't change API endpoints or response format
- Don't change frontend map or heatmap rendering
- Don't change OSM building/vegetation fetching
- Don't change PyInstaller build process (just add data dir)
## Success Criteria
- [ ] SRTM1 tiles load correctly (3601×3601, 30m resolution)
- [ ] Bilinear interpolation working (smoother than nearest-neighbor)
- [ ] Offline mode works with pre-cached tiles
- [ ] Auto-download works when online
- [ ] Missing tile warning shown to user
- [ ] Multi-tile seamless stitching
- [ ] Terrain profile accuracy matches Google Earth within 20m
- [ ] No performance regression (calculation time same or faster)
- [ ] Tile cache directory configurable

View File

@@ -0,0 +1,246 @@
# RFCP — Iteration 3.9.1: Terra Tile Server Integration
## Overview
Connect terrain_service.py to our SRTM tile server (terra.eliah.one) as primary download source, add terrain status API endpoint, and create a bulk pre-download utility. The `data/terrain/` directory already exists.
## Context
- terra.eliah.one is live and serving tiles via Caddy file_server
- SRTM3 (90m): 187 tiles, 515 MB — full Ukraine coverage (N44-N51, E018-E041)
- SRTM1 (30m): 160 tiles, 3.9 GB — same coverage area
- terrain_service.py already has bilinear interpolation (3.9.0)
- Backend runs on Windows with RTX 4060, tiles stored locally in `data/terrain/`
- Server is download source, NOT used during realtime calculations
## Changes Required
### 1. Update SRTM_SOURCES in terrain_service.py
**File:** `backend/app/services/terrain_service.py`
Replace current SRTM_SOURCES (lines 22-25):
```python
SRTM_SOURCES = [
"https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
"https://s3.amazonaws.com/elevation-tiles-prod/skadi/{lat_dir}/{tile_name}.hgt.gz",
]
```
With prioritized source list:
```python
SRTM_SOURCES = [
# Our tile server — SRTM1 (30m) preferred, uncompressed
{
"url": "https://terra.eliah.one/srtm1/{tile_name}.hgt",
"compressed": False,
"resolution": "srtm1",
},
# Our tile server — SRTM3 (90m) fallback
{
"url": "https://terra.eliah.one/srtm3/{tile_name}.hgt",
"compressed": False,
"resolution": "srtm3",
},
# Public AWS mirror — SRTM1, gzip compressed
{
"url": "https://elevation-tiles-prod.s3.amazonaws.com/skadi/{lat_dir}/{tile_name}.hgt.gz",
"compressed": True,
"resolution": "srtm1",
},
]
```
Update `download_tile()` to handle the new source format:
```python
async def download_tile(self, tile_name: str) -> bool:
"""Download SRTM tile from configured sources, preferring highest resolution."""
tile_path = self.get_tile_path(tile_name)
if tile_path.exists():
return True
lat_dir = tile_name[:3] # e.g., "N48"
async with httpx.AsyncClient(timeout=60.0, follow_redirects=True) as client:
for source in self.SRTM_SOURCES:
url = source["url"].format(lat_dir=lat_dir, tile_name=tile_name)
try:
response = await client.get(url)
if response.status_code == 200:
data = response.content
# Skip empty responses
if len(data) < 1000:
continue
if source["compressed"]:
if url.endswith('.gz'):
data = gzip.decompress(data)
elif url.endswith('.zip'):
with zipfile.ZipFile(io.BytesIO(data)) as zf:
for name in zf.namelist():
if name.endswith('.hgt'):
data = zf.read(name)
break
# Validate tile size
if len(data) not in (3601 * 3601 * 2, 1201 * 1201 * 2):
print(f"[Terrain] Invalid tile size {len(data)} from {url}")
continue
tile_path.write_bytes(data)
res = source["resolution"]
size_mb = len(data) / 1048576
print(f"[Terrain] Downloaded {tile_name} ({res}, {size_mb:.1f} MB)")
return True
except Exception as e:
print(f"[Terrain] Failed from {url}: {e}")
continue
print(f"[Terrain] Could not download {tile_name} from any source")
return False
```
### 2. Add Terrain Status API Endpoint
**File:** `backend/app/api/routes.py` (or wherever API routes are defined)
Add a new endpoint:
```python
@router.get("/api/terrain/status")
async def terrain_status():
"""Return terrain data availability info."""
from app.services.terrain_service import terrain_service
cached_tiles = terrain_service.get_cached_tiles()
cache_size = terrain_service.get_cache_size_mb()
# Categorize by resolution
srtm1_tiles = [t for t in cached_tiles
if (terrain_service.terrain_path / f"{t}.hgt").stat().st_size == 3601 * 3601 * 2]
srtm3_tiles = [t for t in cached_tiles if t not in srtm1_tiles]
return {
"total_tiles": len(cached_tiles),
"srtm1": {
"count": len(srtm1_tiles),
"resolution_m": 30,
"tiles": sorted(srtm1_tiles),
},
"srtm3": {
"count": len(srtm3_tiles),
"resolution_m": 90,
"tiles": sorted(srtm3_tiles),
},
"cache_size_mb": round(cache_size, 1),
"memory_cached": len(terrain_service._tile_cache),
"terra_server": "https://terra.eliah.one",
}
```
### 3. Add Bulk Pre-Download Endpoint
**File:** Same routes file
```python
@router.post("/api/terrain/download")
async def terrain_download(request: dict):
"""Pre-download tiles for a region.
Body: {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 50}
Or: {"tiles": ["N48E034", "N48E035", "N47E034", "N47E035"]}
"""
from app.services.terrain_service import terrain_service
if "tiles" in request:
tile_list = request["tiles"]
else:
center_lat = request.get("center_lat", 48.46)
center_lon = request.get("center_lon", 35.04)
radius_km = request.get("radius_km", 50)
tile_list = terrain_service.get_required_tiles(center_lat, center_lon, radius_km)
missing = [t for t in tile_list if not terrain_service.get_tile_path(t).exists()]
if not missing:
return {"status": "ok", "message": "All tiles already cached", "count": len(tile_list)}
# Download missing tiles
downloaded = []
failed = []
for tile_name in missing:
success = await terrain_service.download_tile(tile_name)
if success:
downloaded.append(tile_name)
else:
failed.append(tile_name)
return {
"status": "ok",
"required": len(tile_list),
"already_cached": len(tile_list) - len(missing),
"downloaded": downloaded,
"failed": failed,
}
```
### 4. Add Tile Index Endpoint
**File:** Same routes file
```python
@router.get("/api/terrain/index")
async def terrain_index():
"""Fetch tile index from terra server."""
import httpx
try:
async with httpx.AsyncClient(timeout=10.0) as client:
resp = await client.get("https://terra.eliah.one/api/index")
if resp.status_code == 200:
return resp.json()
except Exception:
pass
return {"error": "Could not reach terra.eliah.one", "offline": True}
```
## Testing Checklist
- [ ] `GET /api/terrain/status` returns tile counts and sizes
- [ ] `POST /api/terrain/download {"center_lat": 48.46, "center_lon": 35.04, "radius_km": 10}` downloads missing tiles from terra.eliah.one
- [ ] Tiles downloaded from terra are valid HGT format (2,884,802 or 25,934,402 bytes)
- [ ] SRTM1 is preferred over SRTM3 when downloading
- [ ] Existing tiles are not re-downloaded
- [ ] Coverage calculation works with terrain data (test with Dnipro coordinates)
- [ ] `GET /api/terrain/index` returns terra server tile list
## Build & Deploy
```bash
cd D:\root\rfcp\backend
# No build needed — Python backend, just restart
# Kill existing uvicorn and restart:
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
```
## Commit Message
```
feat(terrain): integrate terra.eliah.one tile server
- Add terra.eliah.one as primary SRTM source (SRTM1 30m preferred)
- Keep AWS S3 as fallback source
- Add /api/terrain/status endpoint (tile inventory)
- Add /api/terrain/download endpoint (bulk pre-download)
- Add /api/terrain/index endpoint (terra server index)
- Validate tile size before saving
- Add follow_redirects=True to httpx client
```
## Success Criteria
1. terrain_service downloads from terra.eliah.one first
2. /api/terrain/status shows correct tile counts by resolution
3. /api/terrain/download fetches tiles for any Ukrainian coordinate
4. Offline mode works — no downloads attempted if tiles exist locally
5. Coverage calculation uses real elevation data instead of flat terrain

View File

@@ -0,0 +1,656 @@
# RFCP Dependencies & Installer Specification
## Overview
All dependencies needed for RFCP to work out of the box, including GPU acceleration.
The installer must handle everything — user should NOT need to run pip manually.
---
## Python Dependencies
### Core (MUST have)
```txt
# requirements.txt
# Web framework
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
websockets>=12.0
# Scientific computing
numpy>=1.24.0
scipy>=1.11.0
# Geospatial
pyproj>=3.6.0 # coordinate transformations
shapely>=2.0.0 # geometry operations (boundary contours)
# Terrain data
rasterio>=1.3.0 # GeoTIFF reading (optional, for custom terrain)
# Note: SRTM .hgt files read with numpy directly
# OSM data
requests>=2.31.0 # HTTP client for OSM Overpass API
geopy>=2.4.0 # distance calculations
# Database
# sqlite3 is built-in Python — no install needed
# Utilities
orjson>=3.9.0 # fast JSON (optional, faster API responses)
pydantic>=2.0.0 # data validation (FastAPI dependency)
```
### GPU Acceleration (OPTIONAL — auto-detected)
```txt
# requirements-gpu-nvidia.txt
cupy-cuda12x>=12.0.0 # For CUDA 12.x (RTX 30xx, 40xx)
# OR
cupy-cuda11x>=11.0.0 # For CUDA 11.x (older cards)
# requirements-gpu-opencl.txt
pyopencl>=2023.1 # For ANY GPU (Intel, AMD, NVIDIA)
```
### Development / Testing
```txt
# requirements-dev.txt
pytest>=7.0.0
pytest-asyncio>=0.21.0
httpx>=0.25.0 # async test client
```
---
## System Dependencies
### NVIDIA GPU Support
```
REQUIRED: NVIDIA Driver (comes with GPU)
REQUIRED: CUDA Toolkit 12.x (for CuPy)
Check if installed:
nvidia-smi → shows driver version
nvcc --version → shows CUDA toolkit version
If missing CUDA toolkit:
Download from: https://developer.nvidia.com/cuda-downloads
Select: Windows > x86_64 > 11/10 > exe (local)
Size: ~3 GB
Alternative: cupy auto-installs CUDA runtime!
pip install cupy-cuda12x
This bundles CUDA runtime (~700 MB) — no separate install needed
```
### Intel GPU Support (OpenCL)
```
REQUIRED: Intel GPU Driver (usually pre-installed)
REQUIRED: Intel OpenCL Runtime
Check if installed:
Open Device Manager → Display Adapters → Intel UHD/Iris
For OpenCL:
Download Intel GPU Computing Runtime:
https://github.com/intel/compute-runtime/releases
Or: Intel oneAPI Base Toolkit (includes OpenCL)
https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html
```
### AMD GPU Support (OpenCL)
```
REQUIRED: AMD Adrenalin Driver (includes OpenCL)
Download from: https://www.amd.com/en/support
```
---
## Node.js / Frontend Dependencies
### System Requirements
```
Node.js >= 18.0.0 (LTS recommended)
npm >= 9.0.0
Check:
node --version
npm --version
```
### Frontend packages (managed by npm)
```json
// package.json — key dependencies
{
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
"leaflet": "^1.9.4",
"react-leaflet": "^4.2.0",
"recharts": "^2.8.0",
"zustand": "^4.4.0",
"lucide-react": "^0.294.0"
},
"devDependencies": {
"vite": "^5.0.0",
"typescript": "^5.3.0",
"tailwindcss": "^3.4.0",
"@types/leaflet": "^1.9.0"
}
}
```
---
## Installer Script
### Windows Installer (NSIS or Electron-Builder)
```python
# install_rfcp.py — Python-based installer/setup script
import subprocess
import sys
import platform
import os
import shutil
import json
def check_python():
"""Verify Python 3.10+ is available."""
version = sys.version_info
if version.major < 3 or version.minor < 10:
print(f"❌ Python 3.10+ required, found {version.major}.{version.minor}")
return False
print(f"✅ Python {version.major}.{version.minor}.{version.micro}")
return True
def check_node():
"""Verify Node.js 18+ is available."""
try:
result = subprocess.run(["node", "--version"], capture_output=True, text=True)
version = result.stdout.strip().lstrip('v')
major = int(version.split('.')[0])
if major < 18:
print(f"❌ Node.js 18+ required, found {version}")
return False
print(f"✅ Node.js {version}")
return True
except FileNotFoundError:
print("❌ Node.js not found")
return False
def detect_gpu():
"""Detect available GPU hardware."""
gpus = {
"nvidia": False,
"nvidia_name": "",
"intel": False,
"intel_name": "",
"amd": False,
"amd_name": ""
}
# Check NVIDIA
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total",
"--format=csv,noheader"],
capture_output=True, text=True, timeout=5
)
if result.returncode == 0:
info = result.stdout.strip()
gpus["nvidia"] = True
gpus["nvidia_name"] = info.split(",")[0].strip()
print(f"✅ NVIDIA GPU: {info}")
except (FileNotFoundError, subprocess.TimeoutExpired):
print(" No NVIDIA GPU detected")
# Check Intel/AMD via WMI (Windows)
if platform.system() == "Windows":
try:
result = subprocess.run(
["wmic", "path", "win32_videocontroller", "get",
"name,adapterram,driverversion", "/format:csv"],
capture_output=True, text=True, timeout=5
)
for line in result.stdout.strip().split('\n'):
if 'Intel' in line:
gpus["intel"] = True
gpus["intel_name"] = [x for x in line.split(',') if 'Intel' in x][0]
print(f"✅ Intel GPU: {gpus['intel_name']}")
elif 'AMD' in line or 'Radeon' in line:
gpus["amd"] = True
gpus["amd_name"] = [x for x in line.split(',') if 'AMD' in x or 'Radeon' in x][0]
print(f"✅ AMD GPU: {gpus['amd_name']}")
except Exception:
pass
return gpus
def install_core_dependencies():
"""Install core Python dependencies."""
print("\n📦 Installing core dependencies...")
subprocess.run([
sys.executable, "-m", "pip", "install", "-r", "requirements.txt",
"--quiet", "--no-warn-script-location"
], check=True)
print("✅ Core dependencies installed")
def install_gpu_dependencies(gpus: dict):
"""Install GPU-specific dependencies based on detected hardware."""
print("\n🎮 Setting up GPU acceleration...")
gpu_installed = False
# NVIDIA — install CuPy (includes CUDA runtime)
if gpus["nvidia"]:
print(f" Installing CuPy for {gpus['nvidia_name']}...")
try:
# Try CUDA 12 first (newer cards)
subprocess.run([
sys.executable, "-m", "pip", "install", "cupy-cuda12x",
"--quiet", "--no-warn-script-location"
], check=True, timeout=300)
print(f" ✅ CuPy (CUDA 12) installed for {gpus['nvidia_name']}")
gpu_installed = True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
try:
# Fallback to CUDA 11
subprocess.run([
sys.executable, "-m", "pip", "install", "cupy-cuda11x",
"--quiet", "--no-warn-script-location"
], check=True, timeout=300)
print(f" ✅ CuPy (CUDA 11) installed for {gpus['nvidia_name']}")
gpu_installed = True
except Exception as e:
print(f" ⚠️ CuPy installation failed: {e}")
print(f" 💡 Manual install: pip install cupy-cuda12x")
# Intel/AMD — install PyOpenCL
if gpus["intel"] or gpus["amd"]:
gpu_name = gpus["intel_name"] or gpus["amd_name"]
print(f" Installing PyOpenCL for {gpu_name}...")
try:
subprocess.run([
sys.executable, "-m", "pip", "install", "pyopencl",
"--quiet", "--no-warn-script-location"
], check=True, timeout=120)
print(f" ✅ PyOpenCL installed for {gpu_name}")
gpu_installed = True
except Exception as e:
print(f" ⚠️ PyOpenCL installation failed: {e}")
print(f" 💡 Manual install: pip install pyopencl")
if not gpu_installed:
print(" No GPU acceleration available — using CPU (NumPy)")
print(" 💡 This is fine! GPU just makes large calculations faster.")
return gpu_installed
def install_frontend():
"""Install frontend dependencies and build."""
print("\n🌐 Setting up frontend...")
frontend_dir = os.path.join(os.path.dirname(__file__), "frontend")
if os.path.exists(os.path.join(frontend_dir, "package.json")):
subprocess.run(["npm", "install"], cwd=frontend_dir, check=True)
subprocess.run(["npm", "run", "build"], cwd=frontend_dir, check=True)
print("✅ Frontend built")
else:
print("⚠️ Frontend directory not found")
def download_terrain_data():
"""Pre-download SRTM terrain tiles for Ukraine."""
print("\n🏔️ Checking terrain data...")
cache_dir = os.path.expanduser("~/.rfcp/terrain")
os.makedirs(cache_dir, exist_ok=True)
# Ukraine bounding box: lat 44-53, lon 22-41
# SRTM tiles needed for typical use
required_tiles = [
# Lviv oblast area (common test area)
"N49E025", "N49E024", "N49E026",
"N50E025", "N50E024", "N50E026",
# Dnipro area
"N48E034", "N48E035",
"N49E034", "N49E035",
]
existing = [f.replace(".hgt", "") for f in os.listdir(cache_dir) if f.endswith(".hgt")]
missing = [t for t in required_tiles if t not in existing]
if missing:
print(f" {len(missing)} terrain tiles needed (auto-download on first use)")
else:
print(f"{len(existing)} terrain tiles cached")
def create_launcher():
"""Create desktop shortcut / launcher script."""
print("\n🚀 Creating launcher...")
if platform.system() == "Windows":
# Create .bat launcher
launcher = os.path.join(os.path.dirname(__file__), "RFCP.bat")
with open(launcher, 'w') as f:
f.write('@echo off\n')
f.write('title RFCP - RF Coverage Planner\n')
f.write('echo Starting RFCP...\n')
f.write(f'cd /d "{os.path.dirname(__file__)}"\n')
f.write(f'"{sys.executable}" -m uvicorn backend.app.main:app --host 0.0.0.0 --port 8888\n')
print(f" ✅ Launcher created: {launcher}")
return True
def verify_installation():
"""Run quick verification tests."""
print("\n🔍 Verifying installation...")
checks = []
# Check core imports
try:
import numpy as np
checks.append(f"✅ NumPy {np.__version__}")
except ImportError:
checks.append("❌ NumPy missing")
try:
import scipy
checks.append(f"✅ SciPy {scipy.__version__}")
except ImportError:
checks.append("❌ SciPy missing")
try:
import fastapi
checks.append(f"✅ FastAPI {fastapi.__version__}")
except ImportError:
checks.append("❌ FastAPI missing")
try:
import shapely
checks.append(f"✅ Shapely {shapely.__version__}")
except ImportError:
checks.append("⚠️ Shapely missing (boundary features disabled)")
# Check GPU
try:
import cupy as cp
device = cp.cuda.Device(0)
checks.append(f"✅ CuPy → {device.name} ({device.mem_info[1]//1024//1024} MB)")
except ImportError:
checks.append(" CuPy not available")
except Exception as e:
checks.append(f"⚠️ CuPy error: {e}")
try:
import pyopencl as cl
devices = []
for p in cl.get_platforms():
for d in p.get_devices():
devices.append(d.name)
checks.append(f"✅ PyOpenCL → {', '.join(devices)}")
except ImportError:
checks.append(" PyOpenCL not available")
except Exception as e:
checks.append(f"⚠️ PyOpenCL error: {e}")
for check in checks:
print(f" {check}")
return all("" not in c for c in checks)
def main():
"""Main installer entry point."""
print("=" * 60)
print(" RFCP — RF Coverage Planner — Installer")
print("=" * 60)
print()
# Step 1: Check prerequisites
print("📋 Checking prerequisites...")
if not check_python():
sys.exit(1)
check_node()
# Step 2: Detect GPU
gpus = detect_gpu()
# Step 3: Install dependencies
install_core_dependencies()
install_gpu_dependencies(gpus)
# Step 4: Frontend
install_frontend()
# Step 5: Terrain data
download_terrain_data()
# Step 6: Launcher
create_launcher()
# Step 7: Verify
print()
success = verify_installation()
# Summary
print()
print("=" * 60)
if success:
print(" ✅ RFCP installed successfully!")
print()
print(" To start RFCP:")
print(" python -m uvicorn backend.app.main:app --port 8888")
print(" Then open: http://localhost:8888")
print()
if gpus["nvidia"]:
print(f" 🎮 GPU: {gpus['nvidia_name']} (CUDA)")
elif gpus["intel"] or gpus["amd"]:
gpu_name = gpus["intel_name"] or gpus["amd_name"]
print(f" 🎮 GPU: {gpu_name} (OpenCL)")
else:
print(" 💻 Mode: CPU only")
else:
print(" ⚠️ Installation completed with warnings")
print(" Some features may be limited")
print("=" * 60)
if __name__ == "__main__":
main()
```
---
## Electron-Builder / NSIS Packaging
### For .exe Installer
```yaml
# electron-builder.yml
appId: com.rfcp.coverage-planner
productName: "RFCP - RF Coverage Planner"
copyright: "RFCP 2026"
directories:
output: dist
buildResources: build
files:
- "backend/**/*"
- "frontend/dist/**/*"
- "requirements.txt"
- "install_rfcp.py"
- "!**/*.pyc"
- "!**/node_modules/**"
- "!**/venv/**"
extraResources:
- from: "python-embedded/"
to: "python/"
- from: "terrain-data/"
to: "terrain/"
win:
target:
- target: nsis
arch: [x64]
icon: "build/icon.ico"
nsis:
oneClick: false
allowToChangeInstallationDirectory: true
installerIcon: "build/icon.ico"
license: "LICENSE.md"
# Custom NSIS script for GPU detection
include: "build/gpu-detect.nsh"
# Install steps:
# 1. Extract files
# 2. Run install_rfcp.py (detects GPU, installs deps)
# 3. Create Start Menu shortcuts
# 4. Create Desktop shortcut
```
### Portable Version (.zip)
```
RFCP-Portable/
├── RFCP.bat # Main launcher
├── install.bat # First-time setup
├── backend/
│ ├── app/
│ │ ├── main.py
│ │ ├── api/
│ │ ├── services/
│ │ └── models/
│ └── requirements.txt
├── frontend/
│ └── dist/ # Pre-built frontend
├── python/ # Embedded Python (optional)
│ ├── python.exe
│ └── Lib/
├── terrain/ # Pre-cached .hgt files
│ ├── N49E025.hgt
│ └── ...
├── data/
│ ├── osm_cache.db # SQLite cache (created on first run)
│ └── config.json # User settings
└── README.md
```
### install.bat (First-Time Setup)
```batch
@echo off
title RFCP - First Time Setup
echo ============================================
echo RFCP - RF Coverage Planner - Setup
echo ============================================
echo.
REM Check if Python exists
python --version >nul 2>&1
if errorlevel 1 (
echo ERROR: Python not found!
echo Please install Python 3.10+ from python.org
pause
exit /b 1
)
REM Run installer
python install_rfcp.py
echo.
echo Setup complete! Run RFCP.bat to start.
pause
```
### RFCP.bat (Launcher)
```batch
@echo off
title RFCP - RF Coverage Planner
cd /d "%~dp0"
REM Check if installed
if not exist "backend\app\main.py" (
echo ERROR: RFCP not found. Run install.bat first.
pause
exit /b 1
)
echo Starting RFCP...
echo Open http://localhost:8888 in your browser
echo Press Ctrl+C to stop
echo.
python -m uvicorn backend.app.main:app --host 0.0.0.0 --port 8888
```
---
## Dependency Size Estimates
| Component | Size |
|-----------|------|
| Python (embedded) | ~30 MB |
| Core pip packages | ~80 MB |
| CuPy + CUDA runtime | ~700 MB |
| PyOpenCL | ~15 MB |
| Frontend (built) | ~5 MB |
| SRTM terrain (Ukraine) | ~300 MB |
| **Total (with CUDA)** | **~1.1 GB** |
| **Total (CPU only)** | **~415 MB** |
---
## Runtime Requirements
| Resource | Minimum | Recommended |
|----------|---------|-------------|
| RAM | 4 GB | 8+ GB |
| Disk | 500 MB | 2 GB (with terrain cache) |
| CPU | 4 cores | 8+ cores |
| GPU | - | NVIDIA GTX 1060+ / Intel UHD 630+ |
| OS | Windows 10 | Windows 10/11 64-bit |
| Python | 3.10 | 3.11+ |
| Node.js | 18 | 20 LTS |
---
## Auto-Update Mechanism (Future)
```python
# Check for updates on startup
async def check_for_updates():
try:
response = await httpx.get(
"https://api.github.com/repos/user/rfcp/releases/latest",
timeout=5
)
latest = response.json()["tag_name"]
current = get_current_version()
if latest != current:
return {
"update_available": True,
"current": current,
"latest": latest,
"download_url": response.json()["assets"][0]["browser_download_url"]
}
except:
pass
return {"update_available": False}
```

View File

@@ -0,0 +1,516 @@
# RFCP — Iteration 3.10.5: WebGL Smooth Coverage Interpolation
**Date:** February 6, 2026
**Priority:** P1 (Major Visual Improvement)
**Estimated Time:** 3-4 hours
**Author:** Claude (Opus 4.5) for Олег @ UMTC
---
## Overview
Replace the current grid-based square coverage visualization with smooth WebGL-interpolated rendering. Currently coverage is displayed as discrete colored squares which looks "pixelated" and unrealistic. Professional RF tools like CloudRF use smooth gradients that interpolate between measurement points.
**Current State:** Grid squares at 50m/200m resolution → blocky appearance
**Target State:** Smooth bilinear/bicubic interpolation → professional gradient appearance
---
## Problem Description
### Current Implementation
- Coverage points are rendered as discrete squares on a Leaflet canvas layer
- Each grid point (lat, lon, rsrp) → one colored square
- Resolution determines square size (50m = small squares, 200m = large squares)
- Result: Looks like Minecraft, not like professional RF planning software
### Desired Outcome
- Smooth color transitions between coverage points
- GPU-accelerated rendering via WebGL
- No visible grid artifacts
- Performance maintained or improved (GPU does interpolation)
- Same data, better visualization
---
## Technical Approach
### Option A: WebGL Fragment Shader (RECOMMENDED)
Use a WebGL fragment shader that:
1. Receives coverage points as a texture or uniform array
2. For each screen pixel, finds nearest coverage points
3. Performs bilinear interpolation between them
4. Outputs smoothly interpolated color
**Pros:**
- Best visual quality
- GPU-accelerated (fast)
- Scales to any resolution
- Industry standard approach
**Cons:**
- More complex implementation
- Requires WebGL knowledge
### Option B: Canvas with Gaussian Blur
Apply Gaussian blur to the existing canvas after rendering squares.
**Pros:**
- Simple to implement
- Works with existing code
**Cons:**
- Blurs edges (coverage boundary becomes fuzzy)
- Not true interpolation
- Performance overhead
### Option C: Pre-interpolate on CPU
Generate more points by interpolating between existing ones before rendering.
**Pros:**
- Simpler rendering
- Works with existing canvas
**Cons:**
- Much slower (CPU-bound)
- Memory intensive
- Not scalable
**DECISION: Implement Option A (WebGL Fragment Shader)**
---
## Implementation Plan
### Phase 1: WebGL Layer Setup
**File:** `frontend/src/components/map/WebGLCoverageLayer.tsx`
Create a new Leaflet layer that uses WebGL for rendering:
```typescript
import { useEffect, useRef } from 'react';
import { useMap } from 'react-leaflet';
import L from 'leaflet';
interface CoveragePoint {
lat: number;
lon: number;
rsrp: number;
}
interface WebGLCoverageLayerProps {
points: CoveragePoint[];
opacity: number;
minRsrp: number;
maxRsrp: number;
visible: boolean;
}
export default function WebGLCoverageLayer({
points,
opacity,
minRsrp,
maxRsrp,
visible
}: WebGLCoverageLayerProps) {
const map = useMap();
const canvasRef = useRef<HTMLCanvasElement | null>(null);
const glRef = useRef<WebGLRenderingContext | null>(null);
const programRef = useRef<WebGLProgram | null>(null);
useEffect(() => {
if (!visible || points.length === 0) return;
// Create canvas overlay
const canvas = document.createElement('canvas');
const container = map.getContainer();
canvas.width = container.clientWidth;
canvas.height = container.clientHeight;
canvas.style.position = 'absolute';
canvas.style.top = '0';
canvas.style.left = '0';
canvas.style.pointerEvents = 'none';
canvas.style.zIndex = '400'; // Above tiles, below markers
canvas.style.opacity = String(opacity);
container.appendChild(canvas);
canvasRef.current = canvas;
// Initialize WebGL
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl');
if (!gl) {
console.error('WebGL not supported, falling back to canvas');
return;
}
glRef.current = gl as WebGLRenderingContext;
// Setup shaders and render
initShaders(gl as WebGLRenderingContext);
render();
// Handle map move/zoom
const onMove = () => render();
map.on('move', onMove);
map.on('zoom', onMove);
map.on('resize', onResize);
return () => {
map.off('move', onMove);
map.off('zoom', onMove);
map.off('resize', onResize);
canvas.remove();
};
}, [points, visible, opacity, minRsrp, maxRsrp, map]);
// ... shader init and render functions
}
```
### Phase 2: WebGL Shaders
**Vertex Shader:**
```glsl
attribute vec2 a_position;
varying vec2 v_texCoord;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
v_texCoord = (a_position + 1.0) / 2.0;
}
```
**Fragment Shader (Bilinear Interpolation):**
```glsl
precision mediump float;
uniform sampler2D u_coverageTexture;
uniform vec2 u_resolution;
uniform vec4 u_bounds; // minLat, minLon, maxLat, maxLon
uniform float u_minRsrp;
uniform float u_maxRsrp;
varying vec2 v_texCoord;
// RSRP to color gradient (matches existing palette)
vec3 rsrpToColor(float rsrp) {
float t = clamp((rsrp - u_minRsrp) / (u_maxRsrp - u_minRsrp), 0.0, 1.0);
// Color stops: red -> orange -> yellow -> green -> cyan -> blue
// Reversed: strong signal = green/cyan, weak = red/orange
if (t < 0.2) {
return mix(vec3(0.5, 0.0, 0.0), vec3(1.0, 0.0, 0.0), t / 0.2); // maroon -> red
} else if (t < 0.4) {
return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), (t - 0.2) / 0.2); // red -> orange
} else if (t < 0.6) {
return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.4) / 0.2); // orange -> yellow
} else if (t < 0.8) {
return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.6) / 0.2); // yellow -> green
} else {
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.8) / 0.2); // green -> cyan
}
}
void main() {
// Convert screen coords to geographic coords
vec2 geoCoord = mix(u_bounds.xy, u_bounds.zw, v_texCoord);
// Sample coverage texture (contains RSRP values encoded as colors)
vec4 sample = texture2D(u_coverageTexture, v_texCoord);
// Decode RSRP from texture (R channel = normalized RSRP)
float rsrp = mix(u_minRsrp, u_maxRsrp, sample.r);
// Skip if no coverage (alpha = 0)
if (sample.a < 0.1) {
discard;
}
vec3 color = rsrpToColor(rsrp);
gl_FragColor = vec4(color, sample.a);
}
```
### Phase 3: Coverage Data → Texture
Convert coverage points array to a WebGL texture for GPU sampling:
```typescript
function createCoverageTexture(
gl: WebGLRenderingContext,
points: CoveragePoint[],
bounds: L.LatLngBounds,
textureSize: number = 512
): WebGLTexture {
// Create a grid texture from sparse points
const data = new Uint8Array(textureSize * textureSize * 4);
const minLat = bounds.getSouth();
const maxLat = bounds.getNorth();
const minLon = bounds.getWest();
const maxLon = bounds.getEast();
// For each texture pixel, find nearest coverage point and interpolate
for (let y = 0; y < textureSize; y++) {
for (let x = 0; x < textureSize; x++) {
const lat = minLat + (maxLat - minLat) * (y / textureSize);
const lon = minLon + (maxLon - minLon) * (x / textureSize);
// Find nearest points and interpolate (IDW - Inverse Distance Weighting)
const { value, weight } = interpolateIDW(points, lat, lon, 4);
const idx = (y * textureSize + x) * 4;
if (weight > 0) {
// Encode normalized RSRP in R channel, weight in A channel
const normalized = (value - minRsrp) / (maxRsrp - minRsrp);
data[idx] = Math.floor(normalized * 255); // R = RSRP
data[idx + 1] = 0; // G = unused
data[idx + 2] = 0; // B = unused
data[idx + 3] = Math.floor(Math.min(weight, 1) * 255); // A = coverage mask
} else {
data[idx + 3] = 0; // No coverage
}
}
}
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureSize, textureSize, 0, gl.RGBA, gl.UNSIGNED_BYTE, data);
// Enable bilinear filtering for smooth interpolation
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return texture!;
}
// Inverse Distance Weighting interpolation
function interpolateIDW(
points: CoveragePoint[],
lat: number,
lon: number,
k: number = 4,
power: number = 2
): { value: number; weight: number } {
// Find k nearest points
const distances = points.map((p, i) => ({
index: i,
dist: Math.sqrt(Math.pow(p.lat - lat, 2) + Math.pow(p.lon - lon, 2))
}));
distances.sort((a, b) => a.dist - b.dist);
const nearest = distances.slice(0, k);
// If very close to a point, use its value directly
if (nearest[0].dist < 0.0001) {
return { value: points[nearest[0].index].rsrp, weight: 1 };
}
// IDW formula: weighted average where weight = 1 / distance^power
let sumWeights = 0;
let sumValues = 0;
for (const n of nearest) {
const w = 1 / Math.pow(n.dist, power);
sumWeights += w;
sumValues += w * points[n.index].rsrp;
}
// Limit interpolation range (don't extrapolate too far from data)
const maxDist = nearest[nearest.length - 1].dist;
const coverage = maxDist < 0.01 ? 1 : Math.max(0, 1 - maxDist * 50);
return {
value: sumValues / sumWeights,
weight: coverage
};
}
```
### Phase 4: Integration with Existing Code
**Modify:** `frontend/src/components/map/MapView.tsx`
Add toggle between old canvas layer and new WebGL layer:
```typescript
import WebGLCoverageLayer from './WebGLCoverageLayer';
// In MapView component:
const [useWebGL, setUseWebGL] = useState(true);
// In render:
{useWebGL ? (
<WebGLCoverageLayer
points={coveragePoints}
opacity={heatmapOpacity}
minRsrp={-130}
maxRsrp={-50}
visible={showCoverage}
/>
) : (
<GeographicHeatmap ... /> // Existing canvas implementation
)}
```
**Add setting:** `frontend/src/components/panels/SettingsPanel.tsx`
```typescript
<div className="flex items-center justify-between">
<span>Smooth Coverage (WebGL)</span>
<Toggle
checked={useWebGL}
onChange={setUseWebGL}
/>
</div>
```
### Phase 5: Performance Optimizations
1. **Texture Caching:** Only regenerate texture when coverage data changes
2. **Resolution Scaling:** Use smaller texture on zoom out, larger on zoom in
3. **Frustum Culling:** Don't render points outside visible bounds
4. **Web Worker:** Move IDW interpolation to background thread
```typescript
// Memoize texture generation
const coverageTexture = useMemo(() => {
if (!gl || points.length === 0) return null;
return createCoverageTexture(gl, points, bounds, textureSize);
}, [points, bounds, textureSize]);
// Dynamic texture size based on zoom
const textureSize = useMemo(() => {
const zoom = map.getZoom();
if (zoom < 10) return 256;
if (zoom < 14) return 512;
return 1024;
}, [map.getZoom()]);
```
---
## Files to Create/Modify
| File | Action | Description |
|------|--------|-------------|
| `frontend/src/components/map/WebGLCoverageLayer.tsx` | CREATE | New WebGL rendering component |
| `frontend/src/components/map/shaders/coverage.vert` | CREATE | Vertex shader (optional, can inline) |
| `frontend/src/components/map/shaders/coverage.frag` | CREATE | Fragment shader (optional, can inline) |
| `frontend/src/components/map/MapView.tsx` | MODIFY | Add WebGL layer toggle |
| `frontend/src/store/settings.ts` | MODIFY | Add useWebGL setting |
| `frontend/src/components/panels/CoverageSettingsPanel.tsx` | MODIFY | Add WebGL toggle UI |
---
## Testing Checklist
### Visual Quality
- [ ] No visible grid squares at any zoom level
- [ ] Smooth color gradients between coverage points
- [ ] Coverage boundary is smooth, not jagged
- [ ] Colors match existing palette (weak = red, strong = cyan/green)
- [ ] Opacity control works correctly
### Performance
- [ ] 60 FPS during map pan/zoom
- [ ] Initial render < 500ms for 6000 points
- [ ] Memory usage reasonable (< 100MB for large coverage)
- [ ] No GPU memory leaks on repeated calculations
### Compatibility
- [ ] Works on systems without dedicated GPU (falls back gracefully)
- [ ] Works in Chrome, Firefox, Edge
- [ ] Works on both high-DPI and standard displays
### Integration
- [ ] Toggle between WebGL and canvas modes works
- [ ] Coverage data updates correctly after recalculation
- [ ] Settings persist across sessions
- [ ] No console errors or warnings
---
## Fallback Strategy
If WebGL fails to initialize:
1. Log warning to console
2. Fall back to existing canvas implementation
3. Show toast notification to user
```typescript
const gl = canvas.getContext('webgl');
if (!gl) {
console.warn('WebGL not available, using canvas fallback');
setUseWebGL(false);
toast.warning('WebGL not supported, using standard rendering');
return;
}
```
---
## Success Criteria
1. **Visual:** Coverage looks like CloudRF/professional tools — smooth gradients, no grid
2. **Performance:** Same or better than current canvas implementation
3. **Reliability:** Graceful fallback if WebGL unavailable
4. **UX:** User can toggle between modes in settings
---
## Additional Notes
### Color Gradient Reference
Current RSRP color mapping (from `colorGradient.ts`):
```
-130 dBm → Maroon (no service)
-110 dBm → Red (very weak)
-100 dBm → Orange (weak)
-85 dBm → Yellow (fair)
-70 dBm → Green (good)
-50 dBm → Cyan (excellent)
```
### Coordinate Systems
- **Geographic:** Latitude/Longitude (EPSG:4326)
- **Screen:** Pixels from top-left
- **WebGL:** Normalized device coordinates (-1 to 1)
- **Texture:** UV coordinates (0 to 1)
All conversions must account for Web Mercator projection distortion.
---
## References
- WebGL Fundamentals: https://webglfundamentals.org/
- Leaflet Custom Layers: https://leafletjs.com/examples/extending/extending-2-layers.html
- IDW Interpolation: https://en.wikipedia.org/wiki/Inverse_distance_weighting
- CloudRF visualization: https://cloudrf.com (for visual reference)
---
## Commit Message
```
feat(coverage): WebGL smooth interpolation rendering
- Add WebGLCoverageLayer with GPU-accelerated rendering
- Implement IDW interpolation for smooth gradients
- Add toggle between WebGL and canvas modes
- Graceful fallback for systems without WebGL support
Closes #coverage-interpolation
```
---
**Ready for Implementation!**

View File

@@ -0,0 +1,557 @@
# RFCP Iteration 3.5.1 — Bugfixes & Polish
## Overview
Focused bugfix and polish release addressing UI issues, coverage boundary accuracy, history improvements, and GPU indicator fixes discovered during 3.5.0 testing.
---
## 1. GPU — Detection Not Working + UI Overlap
### 1A. GPU Not Detected Despite Being Available
**Problem:** User has a laptop with DUAL GPUs (Intel integrated + NVIDIA discrete) but the app only shows "CPU (NumPy)". GPU acceleration is not working at all — no GPU option available in the device selector.
**Root cause investigation needed:**
1. Check if CuPy is actually installed in the Python environment
2. Check if CUDA toolkit is accessible from the app's runtime
3. Check if PyOpenCL is installed (fallback for Intel GPU)
4. The backend GPU detection may be failing silently
**Debug steps to add:**
```python
# backend/app/services/gpu_backend.py — improve detection with logging
import logging
logger = logging.getLogger(__name__)
@classmethod
def detect_backends(cls) -> list:
backends = []
# Check NVIDIA CUDA
try:
import cupy as cp
count = cp.cuda.runtime.getDeviceCount()
logger.info(f"CUDA detected: {count} device(s)")
for i in range(count):
device = cp.cuda.Device(i)
backends.append({...})
except ImportError:
logger.warning("CuPy not installed — run: pip install cupy-cuda12x")
except Exception as e:
logger.warning(f"CUDA detection failed: {e}")
# Check OpenCL (works with Intel, AMD, AND NVIDIA)
try:
import pyopencl as cl
platforms = cl.get_platforms()
logger.info(f"OpenCL detected: {len(platforms)} platform(s)")
for platform in platforms:
for device in platform.get_devices():
logger.info(f" OpenCL device: {device.name}")
backends.append({...})
except ImportError:
logger.warning("PyOpenCL not installed — run: pip install pyopencl")
except Exception as e:
logger.warning(f"OpenCL detection failed: {e}")
# Always log what was found
logger.info(f"Total compute backends: {len(backends)} "
f"({sum(1 for b in backends if b['type'] == 'cuda')} CUDA, "
f"{sum(1 for b in backends if b['type'] == 'opencl')} OpenCL)")
# CPU always available
backends.append({...cpu...})
return backends
```
**Installation check endpoint:**
```python
# backend/app/api/routes/gpu.py — add diagnostic endpoint
@router.get("/diagnostics")
async def gpu_diagnostics():
"""Full GPU diagnostic info for troubleshooting."""
diag = {
"python_version": sys.version,
"platform": platform.platform(),
"cuda": {},
"opencl": {},
"numpy": {}
}
# Check CuPy/CUDA
try:
import cupy
diag["cuda"]["cupy_version"] = cupy.__version__
diag["cuda"]["cuda_version"] = cupy.cuda.runtime.runtimeGetVersion()
diag["cuda"]["device_count"] = cupy.cuda.runtime.getDeviceCount()
for i in range(diag["cuda"]["device_count"]):
d = cupy.cuda.Device(i)
diag["cuda"][f"device_{i}"] = {
"name": d.name,
"compute_capability": d.compute_capability,
"total_memory_mb": d.mem_info[1] // 1024 // 1024
}
except ImportError:
diag["cuda"]["error"] = "CuPy not installed"
diag["cuda"]["install_hint"] = "pip install cupy-cuda12x --break-system-packages"
except Exception as e:
diag["cuda"]["error"] = str(e)
# Check PyOpenCL
try:
import pyopencl as cl
diag["opencl"]["pyopencl_version"] = cl.VERSION_TEXT
for p in cl.get_platforms():
platform_info = {"name": p.name, "devices": []}
for d in p.get_devices():
platform_info["devices"].append({
"name": d.name,
"type": cl.device_type.to_string(d.type),
"memory_mb": d.global_mem_size // 1024 // 1024,
"compute_units": d.max_compute_units
})
diag["opencl"][p.name] = platform_info
except ImportError:
diag["opencl"]["error"] = "PyOpenCL not installed"
diag["opencl"]["install_hint"] = "pip install pyopencl"
except Exception as e:
diag["opencl"]["error"] = str(e)
# Check NumPy
import numpy as np
diag["numpy"]["version"] = np.__version__
return diag
```
**Frontend — show diagnostic info:**
```typescript
// In GPUIndicator.tsx — when only CPU detected, show help
{devices.length === 1 && devices[0].type === 'cpu' && (
<div className="text-xs text-yellow-400 mt-2 p-2 bg-yellow-900/20 rounded">
No GPU detected.
<button
onClick={() => fetchDiagnostics()}
className="underline ml-1"
>
Run diagnostics
</button>
</div>
)}
```
**Auto-install hint in UI:**
```
⚠ No GPU detected
For NVIDIA GPU: pip install cupy-cuda12x
For Intel/AMD GPU: pip install pyopencl
[Run Diagnostics] [Install Guide]
```
**Dual GPU handling (Intel + NVIDIA laptop):**
```python
# When both Intel (OpenCL) and NVIDIA (CUDA) found:
# - List both in device selector
# - Default to NVIDIA CUDA (faster)
# - Allow user to switch
# - Intel iGPU via OpenCL is still ~3-5x faster than CPU
# Example device list for dual GPU laptop:
# 1. ⚡ NVIDIA GeForce RTX 4060 (CUDA) — 8 GB [DEFAULT]
# 2. ⚡ Intel UHD Graphics 770 (OpenCL) — shared memory
# 3. 💻 CPU (16 cores)
```
### 1B. GPU Indicator UI — Fix Overlap with Fit Button
**Problem:** GPU device dropdown overlaps with the "Fit" button in top-right corner.
**Solution:**
- Keep compact "⚡ CPU" badge in header
- Dropdown opens to the LEFT or DOWNWARD, not overlapping map controls
- Proper z-index and positioning
- Shorter labels: "CPU" not "CPU (NumPy)"
**Files:**
- `frontend/src/components/ui/GPUIndicator.tsx`
- `backend/app/services/gpu_backend.py`
- `backend/app/api/routes/gpu.py`
---
## 2. Coverage Boundary — Improve Accuracy
**Problem:** Current boundary shows a rough circle/ellipse shape that doesn't follow actual coverage contour.
**Current behavior:** Boundary seems to be based on simple distance radius rather than actual RSRP threshold contour.
**Expected behavior:** Boundary should follow the actual -100 dBm (or configured threshold) contour line — an irregular shape that follows terrain, buildings, vegetation shadows.
**Solution:**
```python
# Backend approach: Generate contour from actual RSRP grid
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
from shapely.geometry import MultiPoint
from shapely.ops import unary_union
def calculate_coverage_boundary(points: list, threshold_dbm: float = -100) -> list:
"""
Calculate coverage boundary as convex hull of points above threshold.
Returns list of [lat, lon] coordinates forming the boundary polygon.
"""
# Filter points above threshold
valid_points = [(p['lat'], p['lon']) for p in points if p['rsrp'] >= threshold_dbm]
if len(valid_points) < 3:
return []
# Create concave hull (alpha shape) for realistic boundary
# Concave hull follows the actual shape better than convex hull
from shapely.geometry import MultiPoint
multi_point = MultiPoint(valid_points)
# Alpha shape — adjust alpha for detail level
# Higher alpha = more detailed (but slower)
boundary = concave_hull(multi_point, ratio=0.3)
if boundary.is_empty:
return []
# Simplify to reduce points (tolerance in degrees ≈ 100m)
simplified = boundary.simplify(0.001)
# Return as coordinate list
coords = list(simplified.exterior.coords)
return [[lat, lon] for lat, lon in coords]
```
```python
# Alternative: Grid-based contour approach
def calculate_boundary_from_grid(
grid_points: list,
threshold_dbm: float,
grid_resolution_m: float
) -> list:
"""
Create boundary by finding edge cells of coverage area.
More accurate than hull — follows actual coverage gaps.
"""
import numpy as np
# Build 2D RSRP grid
lats = sorted(set(p['lat'] for p in grid_points))
lons = sorted(set(p['lon'] for p in grid_points))
grid = np.full((len(lats), len(lons)), np.nan)
lat_idx = {lat: i for i, lat in enumerate(lats)}
lon_idx = {lon: i for i, lon in enumerate(lons)}
for p in grid_points:
i = lat_idx[p['lat']]
j = lon_idx[p['lon']]
grid[i, j] = p['rsrp']
# Binary mask: above threshold
mask = grid >= threshold_dbm
# Find boundary: dilate - original = edge cells
dilated = binary_dilation(mask)
boundary_mask = dilated & ~mask
# Extract boundary coordinates
boundary_coords = []
for i in range(len(lats)):
for j in range(len(lons)):
if boundary_mask[i, j]:
boundary_coords.append([lats[i], lons[j]])
# Order points for polygon (traveling salesman approximate)
if len(boundary_coords) > 2:
ordered = order_boundary_points(boundary_coords)
return ordered
return boundary_coords
```
**Frontend changes:**
- Receive boundary polygon from backend (already calculated with results)
- Or calculate client-side from grid points
- Render as Leaflet polygon with dashed white stroke
- Should follow actual coverage shape, not circular approximation
**Files:**
- `backend/app/services/coverage_service.py` — add boundary calculation
- `frontend/src/components/map/CoverageBoundary.tsx` — render real contour
---
## 3. Session History — Show Propagation Parameters
**Problem:** History entries only show preset, points, radius, resolution. Missing propagation settings used.
**Solution:** Save full propagation config snapshot with each history entry.
```typescript
// frontend/src/store/calcHistory.ts
interface HistoryEntry {
id: string;
timestamp: Date;
computationTime: number;
preset: string;
radius: number;
resolution: number;
totalPoints: number;
// Coverage results
coverage: {
excellent: number; // percentage
good: number;
fair: number;
weak: number;
};
avgRsrp: number;
rangeMin: number;
rangeMax: number;
// NEW: Propagation parameters snapshot
propagation: {
modelsUsed: string[]; // ["Free-Space", "terrain_los", ...]
modelCount: number; // 12
frequency: number; // 2100 MHz
txPower: number; // 46 dBm
antennaGain: number; // 15 dBi
antennaHeight: number; // 10 m
// Environment
season: string; // "Winter (30%)"
temperature: string; // "15°C (mild)"
humidity: string; // "50% (normal)"
rainConditions: string; // "Light Rain"
indoorCoverage: string; // "Medium Building (brick)"
// Margins
fadingMargin: number; // 0 dB
// Atmospheric
atmosphericAbsorption: boolean;
};
// Site config
sites: number; // 2
sectors: number; // total sectors
}
```
**Display in History panel:**
```typescript
// Expanded history entry shows propagation details
<div className="history-entry-expanded">
{/* Existing: time, points, coverage bars */}
{/* NEW: Propagation summary (collapsed by default) */}
<details className="mt-2">
<summary className="text-xs text-gray-400 cursor-pointer hover:text-gray-300">
Propagation: {entry.propagation.modelCount} models, {entry.propagation.frequency} MHz
</summary>
<div className="mt-1 pl-3 text-xs text-gray-500 space-y-0.5">
<div>TX: {entry.propagation.txPower} dBm, Gain: {entry.propagation.antennaGain} dBi</div>
<div>Height: {entry.propagation.antennaHeight}m</div>
<div>Environment: {entry.propagation.season}, {entry.propagation.rainConditions}</div>
<div>Indoor: {entry.propagation.indoorCoverage}</div>
{entry.propagation.fadingMargin > 0 && (
<div>Fading margin: {entry.propagation.fadingMargin} dB</div>
)}
<div className="flex flex-wrap gap-1 mt-1">
{entry.propagation.modelsUsed.map(model => (
<span key={model} className="px-1 py-0.5 bg-slate-700 rounded text-[10px]">
{model}
</span>
))}
</div>
</div>
</details>
</div>
```
**Files:**
- `frontend/src/store/calcHistory.ts` — extend HistoryEntry type, save propagation
- `frontend/src/components/panels/HistoryPanel.tsx` — show expandable propagation details
- `backend/app/api/websocket.py` — include propagation config in result message
- `backend/app/services/coverage_service.py` — return config snapshot with results
---
## 4. Results Popup — Show Propagation Summary
**Problem:** Calculation Complete popup shows time, points, coverage bars — but not which models were used.
**Solution:** Add compact propagation info to results popup.
```typescript
// frontend/src/components/ui/ResultsPopup.tsx
// Add below coverage bars:
<div className="mt-2 text-xs text-gray-400">
<span>{result.modelsUsed?.length || 0} models</span>
<span className="mx-1"></span>
<span>{result.frequency} MHz</span>
{result.fadingMargin > 0 && (
<>
<span className="mx-1"></span>
<span>FM: {result.fadingMargin} dB</span>
</>
)}
{result.indoorCoverage && result.indoorCoverage !== 'none' && (
<>
<span className="mx-1"></span>
<span>Indoor: {result.indoorCoverage}</span>
</>
)}
</div>
```
**Files:**
- `frontend/src/components/ui/ResultsPopup.tsx`
---
## 5. Batch Frequency Change (from 3.5.0 backlog)
**Problem:** To compare coverage at different frequencies, user must edit each sector manually.
**Solution:** Quick-change buttons in toolbar or Coverage Settings.
```typescript
// frontend/src/components/panels/BatchOperations.tsx (NEW)
const QUICK_BANDS = [
{ freq: 700, label: '700', band: 'B28', color: 'text-red-400' },
{ freq: 800, label: '800', band: 'B20', color: 'text-orange-400' },
{ freq: 900, label: '900', band: 'B8', color: 'text-yellow-400' },
{ freq: 1800, label: '1800', band: 'B3', color: 'text-green-400' },
{ freq: 2100, label: '2100', band: 'B1', color: 'text-blue-400' },
{ freq: 2600, label: '2600', band: 'B7', color: 'text-purple-400' },
{ freq: 3500, label: '3500', band: 'n78', color: 'text-pink-400' },
];
export function BatchFrequencyChange() {
return (
<div className="p-3 border-t border-slate-700">
<h4 className="text-xs font-semibold text-gray-400 mb-2">
SET ALL SECTORS
</h4>
<div className="flex flex-wrap gap-1">
{QUICK_BANDS.map(b => (
<button
key={b.freq}
onClick={() => setAllSectorsFrequency(b.freq)}
className="px-2 py-1 text-xs bg-slate-700 hover:bg-slate-600 rounded"
title={`${b.band}${b.freq} MHz`}
>
<span className={b.color}>{b.label}</span>
</button>
))}
</div>
</div>
);
}
```
**Location:** Below site list, above Coverage Settings.
**Files:**
- `frontend/src/components/panels/BatchOperations.tsx` (NEW)
- `frontend/src/store/sites.ts` — add `setAllSectorsFrequency()` action
---
## 6. Minor UI Fixes
### 6.1 Terrain Profile — Click Propagation (verify fix)
- Verify that clicking "Terrain Profile" button no longer adds ruler point
- If still broken: ensure e.stopPropagation() AND e.preventDefault() on button
### 6.2 GPU Indicator — Shorter Label
- Current: "CPU (NumPy)" — too long
- Should be: "CPU" or "⚡ CPU"
- When GPU active: "⚡ RTX 4060" (short device name)
### 6.3 ~~Coordinate Display — Show Elevation~~ ✅ WORKS
- Elevation loads on hover with delay — NOT a bug
- Shows "Elev: 380m ASL" after holding cursor on map
- No fix needed
---
## Implementation Order
### Priority 1 — Quick Fixes (30 min)
- [ ] GPU indicator positioning (no overlap with Fit)
- [ ] GPU detection — install CuPy/PyOpenCL, diagnostics endpoint
- [ ] Terrain Profile click fix (verify)
### Priority 2 — History Enhancement (1 hour)
- [ ] Extend HistoryEntry with propagation params
- [ ] Save propagation snapshot on calculation complete
- [ ] Expandable propagation details in History panel
- [ ] Results popup — show model count + frequency
### Priority 3 — Coverage Boundary (1-2 hours)
- [ ] Implement contour-based boundary from actual RSRP grid
- [ ] Replace circular approximation with real coverage shape
- [ ] Test with multi-site calculations
- [ ] Smooth boundary line (simplify polygon)
### Priority 4 — Batch Frequency (30 min)
- [ ] BatchOperations component
- [ ] setAllSectorsFrequency store action
- [ ] Wire into sidebar panel
---
## Success Criteria
- [ ] GPU indicator does not overlap with any map controls
- [ ] Coverage boundary follows actual coverage shape (not circular)
- [ ] History entries show expandable propagation parameters
- [ ] Results popup shows model count and frequency
- [ ] Batch frequency change updates all sectors at once
- [ ] Terrain Profile button click doesn't add ruler point
- [ ] Elevation displays correctly in bottom-left
---
## Files Summary
### New Files
- `frontend/src/components/panels/BatchOperations.tsx`
### Modified Files
- `frontend/src/components/ui/GPUIndicator.tsx` — fix position/overlap
- `frontend/src/components/map/CoverageBoundary.tsx` — real contour
- `frontend/src/components/ui/ResultsPopup.tsx` — propagation info
- `frontend/src/store/calcHistory.ts` — extended HistoryEntry
- `frontend/src/components/panels/HistoryPanel.tsx` — expandable details
- `frontend/src/store/sites.ts` — batch frequency action
- `backend/app/services/coverage_service.py` — boundary calculation, config snapshot
- `backend/app/api/websocket.py` — include config in results
---
*"Polish makes the difference between a tool and a product"*

View File

@@ -0,0 +1,504 @@
# RFCP — Iteration 3.5.2: Native Backend + GPU Fix + UI Polish
## Overview
Fix critical architecture issues: GPU indicator dropdown broken, GPU acceleration not working
(CuPy in wrong Python environment), and prepare path to remove WSL2 dependency for end users.
Plus UI polish items carried over from 3.5.1.
**Priority:** GPU fixes first, then UI polish, then native Windows exploration.
---
## CRITICAL CONTEXT
### Current Architecture Problem
```
RFCP.exe (Electron, Windows)
└── launches backend via WSL2:
python3 -m uvicorn app.main:app --host 0.0.0.0 --port 8090
└── /usr/bin/python3 (WSL2 system Python 3.12)
└── NO venv, NO CuPy installed
User installed CuPy in Windows Python → backend doesn't see it.
User installed CuPy in WSL system Python → needs --break-system-packages
```
### GPU Hardware (Confirmed Working)
```
nvidia-smi output (from WSL2):
NVIDIA GeForce RTX 4060 Laptop GPU
Driver: 581.42 (Windows) / 580.95.02 (WSL2)
CUDA: 13.0
VRAM: 8188 MiB
GPU passthrough: WORKING ✅
```
### Files to Reference
```
backend/app/services/gpu_backend.py — GPUManager class
backend/app/api/routes/gpu.py — GPU API endpoints
frontend/src/components/ui/GPUIndicator.tsx — GPU badge/dropdown
desktop/ — Electron app source
installer/ — Build scripts
```
---
## Task 1: Fix GPU Indicator Dropdown Z-Index (Priority 1 — 10 min)
### Problem
GPU dropdown WORKS (opens on click, shows diagnostics, install hints) but renders
BEHIND the right sidebar panel. The sidebar (Sites, Coverage Settings) has higher
z-index than the GPU dropdown, so the dropdown is invisible/hidden underneath.
See screenshots: dropdown is partially visible only when sidebar is made very narrow.
It shows: "COMPUTE DEVICES", "CPU (NumPy)", install hints, "Run Diagnostics",
and even diagnostics JSON — all working but hidden behind sidebar.
### Root Cause
GPUIndicator dropdown z-index is lower than the right sidebar panel z-index.
### Solution
In `GPUIndicator.tsx` — find the dropdown container div and set z-index
higher than the sidebar:
```tsx
{isOpen && (
<div
className="absolute top-full mt-1 bg-dark-surface border border-dark-border
rounded-lg shadow-2xl p-3 min-w-[300px]"
style={{ zIndex: 9999 }} // MUST be above sidebar (which is ~z-50 or z-auto)
>
...
</div>
)}
```
**Key requirements:**
1. `z-index: 9999` (or at minimum higher than sidebar)
2. Position: dropdown should open to the LEFT (toward center of screen)
to avoid being cut off by right edge
3. `right-0` on the absolute positioning (anchored to right edge of badge)
**Alternative approach** — use Tailwind z-index:
```tsx
className="absolute top-full right-0 mt-1 z-[9999] ..."
```
**Also check:** The parent container of GPUIndicator might need `position: relative`
for absolute positioning to work correctly against the right sidebar.
### Testing
- [ ] Click "CPU" badge → dropdown appears ABOVE the sidebar
- [ ] Full dropdown visible: devices, install hints, diagnostics
- [ ] Dropdown doesn't get cut off on right side
- [ ] Click outside → dropdown closes
- [ ] Dropdown works at any window width
---
## Task 2: Install CuPy in WSL Backend (Priority 1 — 10 min)
### Problem
CuPy installed in Windows Python, but backend runs in WSL2 system Python.
### Solution
Add a startup check in the backend that detects missing GPU packages
and provides clear instructions. Also, the Electron app should try to
install dependencies on first launch.
**Step 1: Backend startup GPU check**
In `backend/app/main.py`, add on startup:
```python
@app.on_event("startup")
async def check_gpu_availability():
"""Log GPU status on startup for debugging."""
import logging
logger = logging.getLogger("rfcp.gpu")
# Check CuPy
try:
import cupy as cp
device_count = cp.cuda.runtime.getDeviceCount()
if device_count > 0:
name = cp.cuda.Device(0).name
mem = cp.cuda.Device(0).mem_info[1] // 1024 // 1024
logger.info(f"✅ GPU detected: {name} ({mem} MB VRAM)")
logger.info(f" CuPy {cp.__version__}, CUDA devices: {device_count}")
else:
logger.warning("⚠️ CuPy installed but no CUDA devices found")
except ImportError:
logger.warning("⚠️ CuPy not installed — GPU acceleration disabled")
logger.warning(" Install: pip install cupy-cuda12x --break-system-packages")
except Exception as e:
logger.warning(f"⚠️ CuPy error: {e}")
# Check PyOpenCL
try:
import pyopencl as cl
platforms = cl.get_platforms()
for p in platforms:
for d in p.get_devices():
logger.info(f"✅ OpenCL device: {d.name.strip()}")
except ImportError:
logger.info(" PyOpenCL not installed (optional)")
except Exception:
pass
```
**Step 2: GPU diagnostics endpoint enhancement**
Enhance `/api/gpu/diagnostics` to return install commands:
```python
@router.get("/diagnostics")
async def gpu_diagnostics():
import platform, sys
diagnostics = {
"python": sys.version,
"platform": platform.platform(),
"executable": sys.executable,
"is_wsl": "microsoft" in platform.release().lower(),
"cuda_available": False,
"opencl_available": False,
"install_hint": "",
"devices": []
}
# Check nvidia-smi
try:
import subprocess
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
capture_output=True, text=True, timeout=5
)
if result.returncode == 0:
diagnostics["nvidia_smi"] = result.stdout.strip()
except:
diagnostics["nvidia_smi"] = "not found"
# Check CuPy
try:
import cupy
diagnostics["cupy_version"] = cupy.__version__
diagnostics["cuda_available"] = True
count = cupy.cuda.runtime.getDeviceCount()
for i in range(count):
d = cupy.cuda.Device(i)
diagnostics["devices"].append({
"id": i,
"name": d.name,
"memory_mb": d.mem_info[1] // 1024 // 1024,
"backend": "CUDA"
})
except ImportError:
if diagnostics.get("is_wsl"):
diagnostics["install_hint"] = "pip3 install cupy-cuda12x --break-system-packages"
else:
diagnostics["install_hint"] = "pip install cupy-cuda12x"
return diagnostics
```
**Step 3: Frontend shows diagnostics clearly**
In GPUIndicator dropdown, show:
```
⚠ No GPU detected
Your system: WSL2 + NVIDIA RTX 4060
To enable GPU acceleration:
┌─────────────────────────────────────────────┐
│ pip3 install cupy-cuda12x │
│ --break-system-packages │
└─────────────────────────────────────────────┘
Then restart RFCP.
[Copy Command] [Run Diagnostics]
```
### Testing
- [ ] Backend startup logs GPU status
- [ ] /api/gpu/diagnostics returns WSL detection + install hint
- [ ] Frontend shows clear install instructions
- [ ] After installing CuPy in WSL + restart → GPU appears in list
---
## Task 3: Terrain Profile Click Fix (Priority 2 — 5 min)
### Problem
Clicking "Terrain Profile" button in ruler measurement also adds a point on the map.
### Solution
In the Terrain Profile button handler:
```tsx
const handleTerrainProfile = (e: React.MouseEvent) => {
e.stopPropagation();
e.preventDefault();
// ... open terrain profile
};
```
Also check if the button is rendered inside a map click handler area —
may need `L.DomEvent.disableClickPropagation(container)` on the parent.
### Testing
- [ ] Click "Terrain Profile" → opens profile, NO new ruler point added
- [ ] Map click still works normally when not clicking the button
---
## Task 4: Coverage Boundary — Real Contour Shape (Priority 2 — 45 min)
### Problem
Current boundary is a rough circle/ellipse. Should follow actual coverage contour.
### Approaches
**Option A: Shapely Alpha Shape (recommended)**
```python
# backend/app/services/boundary_service.py
from shapely.geometry import MultiPoint
from shapely.ops import unary_union
import numpy as np
def calculate_coverage_boundary(points: list, threshold_dbm: float = -100) -> list:
"""Calculate concave hull of coverage area above threshold."""
# Filter points above threshold
valid = [(p['lon'], p['lat']) for p in points if p['rsrp'] >= threshold_dbm]
if len(valid) < 3:
return []
mp = MultiPoint(valid)
# Use convex hull first, then try concave
try:
# Shapely 2.0+ has concave_hull
from shapely import concave_hull
hull = concave_hull(mp, ratio=0.3)
except ImportError:
# Fallback to convex hull
hull = mp.convex_hull
# Simplify to reduce points (0.001 deg ≈ 100m)
simplified = hull.simplify(0.001, preserve_topology=True)
# Extract coordinates
if simplified.geom_type == 'Polygon':
coords = list(simplified.exterior.coords)
return [{'lat': c[1], 'lon': c[0]} for c in coords]
return []
```
**Option B: Grid-based contour (simpler)**
```python
def grid_contour_boundary(points: list, threshold_dbm: float, resolution: float):
"""Find boundary by detecting edge cells in grid."""
# Create binary grid: 1 = above threshold, 0 = below
# Find cells where 1 is adjacent to 0 = boundary
# Convert cell coords back to lat/lon
# Return ordered boundary points
```
### API Endpoint
```python
# Add to coverage calculation response
@router.post("/coverage/calculate")
async def calculate_coverage(...):
result = coverage_service.calculate(...)
# Calculate boundary
if result.points:
boundary = calculate_coverage_boundary(
result.points,
threshold_dbm=settings.min_signal
)
result.boundary = boundary
return result
```
### Frontend
```tsx
// CoverageBoundary.tsx — use returned boundary coords
// Instead of calculating alpha shape on frontend
const CoverageBoundary = ({ points, boundary }) => {
// If server returned boundary, use it
if (boundary && boundary.length > 0) {
return <Polygon positions={boundary.map(p => [p.lat, p.lon])} />;
}
// Fallback to current convex hull implementation
return <CurrentImplementation points={points} />;
};
```
### Dependencies
Need `shapely` installed:
```
pip install shapely # or pip3 install shapely --break-system-packages
```
Check if already in requirements.txt.
### Testing
- [ ] 5km calculation → boundary follows actual coverage shape
- [ ] 10km calculation → boundary is irregular (terrain-dependent)
- [ ] Toggle boundary on/off works
- [ ] Boundary doesn't crash with < 3 points
---
## Task 5: Results Popup Enhancement (Priority 3 — 15 min)
### Problem
Calculation complete toast/popup doesn't show which models were used.
### Solution
Enhance the toast message after calculation:
```tsx
// Current:
toast.success(`Calculated ${points} points in ${time}s`);
// Enhanced:
const modelCount = result.modelsUsed?.length ?? 0;
const freq = sites[0]?.frequency ?? 0;
const presetName = settings.preset ?? 'custom';
toast.success(
`${points} pts • ${time}s • ${presetName}${freq} MHz • ${modelCount} models`,
{ duration: 5000 }
);
```
### Testing
- [ ] After calculation, toast shows: points, time, preset, frequency, model count
---
## Task 6: Native Windows Backend (Priority 3 — Research/Plan)
### Problem
Current setup REQUIRES WSL2. Users without WSL2 can't use RFCP at all.
### Current Flow
```
RFCP.exe (Electron)
→ detects WSL2
→ launches: wsl python3 -m uvicorn ...
→ backend runs in WSL2 Linux
```
### Target Flow
```
RFCP.exe (Electron)
→ Option A: embedded Python (Windows native)
→ Option B: detect system Python (Windows)
→ Option C: keep WSL2 but with fallback
```
### Research Tasks (don't implement yet, just investigate)
1. **Check how Electron currently launches backend:**
```bash
# Look at desktop/ directory
cat desktop/src/main.ts # or main.js
# Find where it spawns python/uvicorn
```
2. **Check if Windows Python works for backend:**
```powershell
# In Windows PowerShell:
cd D:\root\rfcp\backend
python -m uvicorn app.main:app --host 0.0.0.0 --port 8090
# Does it start? What errors?
```
3. **Evaluate embedded Python options:**
- python-embedded (official, ~30 MB)
- PyInstaller (bundle backend as .exe)
- cx_Freeze
- Nuitka (compile Python to C)
4. **Document findings** — create a brief report:
```
RFCP-Native-Backend-Research.md
- Current architecture (WSL2 dependency)
- Windows Python compatibility test results
- Recommended approach
- Migration steps
- Timeline estimate
```
### Goal
User downloads RFCP.exe → installs → clicks icon → everything works.
No WSL2. No manual pip install. GPU auto-detected.
---
## Implementation Order
### Priority 1 (30 min total)
1. **Task 1:** Fix GPU dropdown — make it clickable again
2. **Task 2:** GPU diagnostics + install instructions in UI
3. **Task 3:** Terrain Profile click propagation fix
### Priority 2 (1 hour)
4. **Task 4:** Coverage boundary real contour (shapely)
5. **Task 5:** Results popup enhancement
### Priority 3 (Research only)
6. **Task 6:** Investigate native Windows backend — report only, no implementation
---
## Build & Deploy
```bash
# After implementation:
cd /mnt/d/root/rfcp/frontend
npx tsc --noEmit # TypeScript check
npm run build # Production build
# Rebuild Electron:
cd /mnt/d/root/rfcp/installer
bash build-win.sh
# Test:
# Install new .exe and verify GPU indicator works
```
---
## Success Criteria
- [ ] GPU dropdown opens when clicking badge
- [ ] Dropdown shows device list or install instructions
- [ ] After `pip3 install cupy-cuda12x --break-system-packages` in WSL + restart → GPU visible
- [ ] Terrain Profile click doesn't add ruler points
- [ ] Coverage boundary follows actual signal contour
- [ ] Results toast shows model count and frequency
- [ ] Native Windows backend research document created

View File

@@ -0,0 +1,556 @@
# RFCP — Iteration 3.6.0: Production GPU Build
## Overview
Enable GPU acceleration in the production PyInstaller build. Currently production
runs CPU-only (NumPy) because CuPy is not included in rfcp-server.exe.
**Goal:** User with NVIDIA GPU installs RFCP → GPU detected automatically →
coverage calculations use CUDA acceleration. No manual pip install required.
**Context from diagnostics screenshot:**
```json
{
"python_executable": "C:\\Users\\Administrator\\AppData\\Local\\Programs\\RFCP\\resources\\backend\\rfcp-server.exe",
"platform": "Windows-10-10.0.26288-SP0",
"is_wsl": false,
"numpy": { "version": "1.26.4" },
"cuda": {
"error": "CuPy not installed",
"install_hint": "pip install cupy-cuda12x"
}
}
```
**Architecture:** Production uses PyInstaller-bundled rfcp-server.exe (self-contained).
CuPy not included → GPU not available for end users.
---
## Strategy: Two-Tier Build
Instead of one massive binary, produce two builds:
```
RFCP-Setup-{version}.exe (~150 MB) — CPU-only, works everywhere
RFCP-Setup-{version}-GPU.exe (~700 MB) — includes CuPy + CUDA runtime
```
**Why not dynamic loading?**
PyInstaller bundles everything at build time. CuPy can't be pip-installed
into a frozen exe at runtime. Options are:
1. **Bundle CuPy in PyInstaller** ← cleanest, what we'll do
2. Side-load CuPy DLLs (fragile, version-sensitive)
3. Hybrid: unfrozen Python + CuPy installed separately (defeats purpose of exe)
---
## Task 1: PyInstaller Spec with CuPy (Priority 1 — 30 min)
### File: `installer/rfcp-server-gpu.spec`
Create a separate .spec file that includes CuPy:
```python
# rfcp-server-gpu.spec — GPU-enabled build
import os
import sys
from PyInstaller.utils.hooks import collect_all, collect_dynamic_libs
backend_path = os.path.abspath(os.path.join(os.path.dirname(SPEC), '..', 'backend'))
# Collect CuPy and its CUDA dependencies
cupy_datas, cupy_binaries, cupy_hiddenimports = collect_all('cupy')
# Also collect cupy_backends
cupyb_datas, cupyb_binaries, cupyb_hiddenimports = collect_all('cupy_backends')
# CUDA runtime libraries that CuPy needs
cuda_binaries = collect_dynamic_libs('cupy')
a = Analysis(
[os.path.join(backend_path, 'run_server.py')],
pathex=[backend_path],
binaries=cupy_binaries + cupyb_binaries + cuda_binaries,
datas=[
(os.path.join(backend_path, 'data', 'terrain'), 'data/terrain'),
] + cupy_datas + cupyb_datas,
hiddenimports=[
# Existing imports from rfcp-server.spec
'uvicorn.logging',
'uvicorn.loops',
'uvicorn.loops.auto',
'uvicorn.protocols',
'uvicorn.protocols.http',
'uvicorn.protocols.http.auto',
'uvicorn.protocols.websockets',
'uvicorn.protocols.websockets.auto',
'uvicorn.lifespan',
'uvicorn.lifespan.on',
'motor',
'pymongo',
'numpy',
'scipy',
'shapely',
'shapely.geometry',
'shapely.ops',
# CuPy-specific
'cupy',
'cupy.cuda',
'cupy.cuda.runtime',
'cupy.cuda.driver',
'cupy.cuda.memory',
'cupy.cuda.stream',
'cupy._core',
'cupy._core.core',
'cupy._core._routines_math',
'cupy.fft',
'cupy.linalg',
'fastrlock',
] + cupy_hiddenimports + cupyb_hiddenimports,
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.datas,
[],
name='rfcp-server',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=False, # Don't compress CUDA libs — they need fast loading
console=True,
icon=os.path.join(os.path.dirname(SPEC), 'rfcp.ico'),
)
```
### Key Points:
- `collect_all('cupy')` grabs all CuPy submodules + CUDA DLLs
- `fastrlock` is a CuPy dependency (must be in hiddenimports)
- `upx=False` — don't compress CUDA binaries (breaks them)
- One-file mode (`a.binaries + a.datas` in EXE) for single exe
---
## Task 2: Build Script for GPU Variant (Priority 1 — 15 min)
### File: `installer/build-gpu.bat` (Windows)
```batch
@echo off
echo ========================================
echo RFCP GPU Build — rfcp-server-gpu.exe
echo ========================================
REM Ensure CuPy is installed in build environment
echo Checking CuPy installation...
python -c "import cupy; print(f'CuPy {cupy.__version__} with CUDA {cupy.cuda.runtime.runtimeGetVersion()}')"
if errorlevel 1 (
echo ERROR: CuPy not installed. Run: pip install cupy-cuda12x
exit /b 1
)
REM Build with GPU spec
echo Building rfcp-server with GPU support...
cd /d %~dp0\..\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
echo.
echo Build complete! Output: dist\rfcp-server.exe
echo Size:
dir dist\rfcp-server.exe
REM Optional: copy to Electron resources
if exist "..\desktop\resources" (
copy /y dist\rfcp-server.exe ..\desktop\resources\rfcp-server.exe
echo Copied to desktop\resources\
)
pause
```
### File: `installer/build-gpu.sh` (WSL/Linux)
```bash
#!/bin/bash
set -e
echo "========================================"
echo " RFCP GPU Build — rfcp-server (GPU)"
echo "========================================"
# Check CuPy
python3 -c "import cupy; print(f'CuPy {cupy.__version__}')" 2>/dev/null || {
echo "ERROR: CuPy not installed. Run: pip install cupy-cuda12x"
exit 1
}
cd "$(dirname "$0")/../backend"
pyinstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
echo ""
echo "Build complete!"
ls -lh dist/rfcp-server*
```
---
## Task 3: GPU Backend — Graceful CuPy Detection (Priority 1 — 15 min)
### File: `backend/app/services/gpu_backend.py`
The existing gpu_backend.py should already handle CuPy absence gracefully.
Verify and fix if needed:
```python
# gpu_backend.py — must work in BOTH CPU and GPU builds
import numpy as np
# Try importing CuPy — this is the key detection
_cupy_available = False
_gpu_device_name = None
_gpu_memory_mb = 0
try:
import cupy as cp
# Verify we can actually use it (not just import)
device = cp.cuda.Device(0)
_gpu_device_name = device.attributes.get('name', f'CUDA Device {device.id}')
# Try to get name via runtime
try:
props = cp.cuda.runtime.getDeviceProperties(0)
_gpu_device_name = props.get('name', _gpu_device_name)
if isinstance(_gpu_device_name, bytes):
_gpu_device_name = _gpu_device_name.decode('utf-8').strip('\x00')
except Exception:
pass
_gpu_memory_mb = device.mem_info[1] // (1024 * 1024)
_cupy_available = True
except ImportError:
cp = None # CuPy not installed (CPU build)
except Exception as e:
cp = None # CuPy installed but CUDA not available
print(f"[GPU] CuPy found but CUDA unavailable: {e}")
def is_gpu_available() -> bool:
return _cupy_available
def get_gpu_info() -> dict:
if _cupy_available:
return {
"available": True,
"backend": "CuPy (CUDA)",
"device": _gpu_device_name,
"memory_mb": _gpu_memory_mb,
}
return {
"available": False,
"backend": "NumPy (CPU)",
"device": "CPU",
"memory_mb": 0,
}
def get_array_module():
"""Return cupy if available, otherwise numpy."""
if _cupy_available:
return cp
return np
```
### Usage in coverage_service.py:
```python
from app.services.gpu_backend import get_array_module, is_gpu_available
xp = get_array_module() # cupy or numpy — same API
# All calculations use xp instead of np:
distances = xp.sqrt(dx**2 + dy**2)
path_loss = 20 * xp.log10(distances) + 20 * xp.log10(freq_mhz) - 27.55
# If using cupy, results need to come back to CPU for JSON serialization:
if is_gpu_available():
results = xp.asnumpy(path_loss)
else:
results = path_loss
```
---
## Task 4: GPU Status in Frontend Header (Priority 2 — 10 min)
### Update GPUIndicator.tsx
When GPU is detected, the badge should clearly show it:
```
CPU build: [⚙ CPU] (gray badge)
GPU detected: [⚡ RTX 4060] (green badge)
```
The existing GPUIndicator already does this. Just verify:
1. Badge color changes from gray → green when GPU available
2. Dropdown shows "Active: GPU (CUDA)" not just "CPU (NumPy)"
3. No install hints shown when CuPy IS available
---
## Task 5: Build Environment Setup (Priority 1 — Manual by Олег)
### Prerequisites for GPU build:
```powershell
# 1. Install CuPy in Windows Python (NOT WSL)
pip install cupy-cuda12x
# 2. Verify CuPy works
python -c "import cupy; print(cupy.cuda.runtime.runtimeGetVersion())"
# Should print: 12000 or similar
# 3. Install PyInstaller if not present
pip install pyinstaller
# 4. Verify fastrlock (CuPy dependency)
pip install fastrlock
```
### Build commands:
```powershell
# CPU-only build (existing)
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server.spec --clean --noconfirm
# GPU build (new)
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
```
### Expected output sizes:
```
rfcp-server.exe (CPU): ~80 MB
rfcp-server.exe (GPU): ~600-800 MB (CuPy bundles CUDA runtime libs)
```
---
## Task 6: Electron — Detect Build Variant (Priority 2 — 10 min)
### File: `desktop/main.js` or `desktop/src/main.ts`
Add version detection so UI knows which build it's running:
```javascript
// After backend starts, check GPU status
async function checkBackendCapabilities() {
try {
const response = await fetch('http://127.0.0.1:8090/api/gpu/status');
const data = await response.json();
// Send to renderer
mainWindow.webContents.send('gpu-status', data);
if (data.available) {
console.log(`[RFCP] GPU: ${data.device} (${data.memory_mb} MB)`);
} else {
console.log('[RFCP] Running in CPU mode');
}
} catch (e) {
console.log('[RFCP] Backend not ready for GPU check');
}
}
```
---
## Task 7: About / Version Info (Priority 3 — 5 min)
### Add build info to `/api/health` response:
```python
@app.get("/api/health")
async def health():
gpu_info = get_gpu_info()
return {
"status": "ok",
"version": "3.6.0",
"build": "gpu" if gpu_info["available"] else "cpu",
"gpu": gpu_info,
"python": sys.version,
"platform": platform.platform(),
}
```
---
## Build & Test Procedure
### Step 1: Setup Build Environment
```powershell
# Windows PowerShell (NOT WSL)
cd D:\root\rfcp
# Verify Python environment
python --version # Should be 3.11.x
pip list | findstr cupy # Should show cupy-cuda12x
# If CuPy not installed:
pip install cupy-cuda12x fastrlock
```
### Step 2: Build GPU Variant
```powershell
cd D:\root\rfcp\backend
pyinstaller ..\installer\rfcp-server-gpu.spec --clean --noconfirm
```
### Step 3: Test Standalone
```powershell
# Run the built exe directly
.\dist\rfcp-server.exe
# In another terminal:
curl http://localhost:8090/api/health
curl http://localhost:8090/api/gpu/status
curl http://localhost:8090/api/gpu/diagnostics
```
### Step 4: Verify GPU Detection
Expected `/api/gpu/status` response:
```json
{
"available": true,
"backend": "CuPy (CUDA)",
"device": "NVIDIA GeForce RTX 4060 Laptop GPU",
"memory_mb": 8188
}
```
### Step 5: Run Coverage Calculation
- Place a site on map
- Calculate coverage (10km, 200m resolution)
- Check logs for: `[GPU] Using CUDA: RTX 4060 (8188 MB)`
- Compare performance: should be 5-10x faster than CPU
### Step 6: Full Electron Build
```powershell
# Copy GPU server to Electron resources
copy backend\dist\rfcp-server.exe desktop\resources\
# Build Electron installer
cd installer
.\build-win.sh # or equivalent Windows script
```
---
## Risk Assessment
### Size Concern
CuPy bundles CUDA runtime (~500MB). Total GPU installer ~700-800MB.
**Mitigation:** This is acceptable for a professional RF planning tool.
AutoCAD is 7GB. QGIS is 1.5GB. Atoll is 3GB+.
### CUDA Version Compatibility
CuPy-cuda12x requires CUDA 12.x compatible driver.
RTX 4060 with Driver 581.42 → CUDA 13.0 → backward compatible ✅
**Mitigation:** gpu_backend.py already falls back to NumPy gracefully.
### PyInstaller + CuPy Issues
Known issues:
- CuPy uses many .so/.dll files that PyInstaller might miss
- `collect_all('cupy')` should catch them, but test thoroughly
- If missing DLLs → add them manually to `binaries` list
**Mitigation:** Test the standalone exe on a clean machine (no Python installed).
### Antivirus False Positives
Larger exe = more AV suspicion. PyInstaller exes already trigger some AV.
**Mitigation:** Code-sign the exe (future task), submit to AV vendors for whitelisting.
---
## Success Criteria
- [ ] `rfcp-server-gpu.spec` created and builds successfully
- [ ] Built exe detects RTX 4060 on startup
- [ ] `/api/gpu/status` returns `"available": true`
- [ ] Coverage calculation uses CuPy (check logs)
- [ ] GPU badge shows "⚡ RTX 4060" (green) in header
- [ ] Fallback to NumPy works if CUDA unavailable
- [ ] CPU-only spec (`rfcp-server.spec`) still builds and works
- [ ] Build time < 10 minutes
- [ ] GPU exe size < 1 GB
---
## Commit Message
```
feat(build): add GPU-enabled PyInstaller build with CuPy + CUDA
- New rfcp-server-gpu.spec with CuPy/CUDA collection
- Build scripts: build-gpu.bat, build-gpu.sh
- Graceful GPU detection in gpu_backend.py
- Two-tier build: CPU (~80MB) and GPU (~700MB) variants
- Auto-detection: RTX 4060 → CuPy acceleration
- Fallback: no CUDA → NumPy (CPU mode)
Iteration 3.6.0 — Production GPU Build
```
---
## Files Summary
### New Files:
| File | Purpose |
|------|---------|
| `installer/rfcp-server-gpu.spec` | PyInstaller config with CuPy |
| `installer/build-gpu.bat` | Windows GPU build script |
| `installer/build-gpu.sh` | Linux/WSL GPU build script |
### Modified Files:
| File | Changes |
|------|---------|
| `backend/app/services/gpu_backend.py` | Verify graceful detection |
| `backend/app/main.py` | Health endpoint with build info |
| `desktop/main.js` or `main.ts` | GPU status check after backend start |
| `frontend/src/components/ui/GPUIndicator.tsx` | Verify badge shows GPU |
### No Changes Needed:
| File | Reason |
|------|--------|
| `installer/rfcp-server.spec` | CPU build stays as-is |
| `backend/app/services/coverage_service.py` | Already uses get_array_module() |
| `installer/build-win.sh` | Existing CPU build unchanged |
---
## Timeline
| Phase | Task | Time |
|-------|------|------|
| **P1** | Create rfcp-server-gpu.spec | 30 min |
| **P1** | Build scripts | 15 min |
| **P1** | Verify gpu_backend.py | 15 min |
| **P2** | Frontend badge verification | 10 min |
| **P2** | Electron GPU status | 10 min |
| **P3** | Health endpoint update | 5 min |
| **Test** | Build + test standalone | 20 min |
| **Test** | Full Electron build | 15 min |
| | **Total** | **~2 hours** |
**Claude Code estimated time: 10-15 min** (spec + scripts + backend changes)
**Manual testing by Олег: 30-45 min** (building + verifying)

View File

@@ -0,0 +1,220 @@
# RFCP Project Roadmap — Updated February 4, 2026
**Project:** RFCP (RF Coverage Planning) for UMTC
**Developer:** Олег + Claude
**Started:** January 30, 2025
**Current Version:** 3.8.0 (GPU Acceleration Complete)
---
## ✅ Completed Milestones
### Phase 1: Frontend (January 2025)
- ✅ React + TypeScript + Vite + Leaflet
- ✅ Multi-site RF coverage planning
- ✅ Multi-sector sites (Alpha/Beta/Gamma)
- ✅ Geographic-scale canvas heatmap
- ✅ Keyboard shortcuts + delete confirmation
- ✅ NumberInput components with sliders
- ✅ TypeScript strict mode, ESLint clean
- ✅ Production build: 536KB / 163KB gzipped
### Phase 2: Backend Architecture (February 1, 2025)
- ✅ Python FastAPI + NumPy + ProcessPoolExecutor
- ✅ 8 propagation models (FreeSpace, Okumura-Hata, COST-231, ITU-R P.1546, etc.)
- ✅ Modular geometry engine (haversine, intersection, reflection, diffraction, LOS)
- ✅ SharedMemoryManager for terrain data (zero-copy, 25 MB)
- ✅ Building filtering (351k → 27k bbox → 15k cap)
- ✅ Overpass API with retry + mirror failover
- ✅ WebSocket progress streaming
### Phase 3: Performance (February 2-3, 2025)
- ✅ LOD (Level of Detail) optimization
- ✅ Spatial indexing for buildings (R-tree)
- ✅ Dominant path simplification for distant points
- ✅ OOM fix + memory management
- ✅ CloudRF-style color gradient
- ✅ Results popup + session history
- ✅ Terrain profile viewer
### Phase 4: GPU Acceleration (February 3-4, 2025) ⭐
- ✅ CuPy + CUDA backend (RTX 4060)
- ✅ CUDA Toolkit 13.1 + cupy-cuda13x setup
- ✅ Phase 2.5: Vectorized distances + path_loss (0.006s)
- ✅ Phase 2.6: Vectorized terrain LOS + diffraction (0.04s)
- ✅ Phase 2.7: Vectorized antenna pattern loss
- ✅ Vegetation bbox pre-filter (100x+ speedup)
- ✅ Worker process isolation (no CUDA in workers)
- ✅ PyInstaller ONEDIR GPU build (1.2 GB installer)
-**Full preset: 195s → 11.2s (17.4x speedup)**
### Supporting Work
- ✅ RF Radio Theory wiki article (comprehensive)
- ✅ Propagation model research (CloudRF, SPLAT!, Signal Server)
- ✅ RFCP Method collaboration framework documented
---
## 📊 Current Performance
| Preset | Points | Resolution | Time (cached) | Time (cold) |
|--------|--------|-----------|---------------|-------------|
| Standard | 1,975 | 200m | **2.3s** | ~12s |
| Full | 6,640 | 50m | **11.2s** | ~20s |
| 50km radius | 4,966 | adaptive | ~410s | ~420s |
**Hardware:** Windows 11, RTX 4060 Laptop GPU, 6-core CPU
---
## 🔜 Next: Phase 5 — Data & Accuracy
### 5.1 SRTM Terrain Integration
**Priority:** HIGH
**Status:** Not started
Current terrain: Single HGT tile download per calculation
Target: Pre-cached SRTM/ASTER DEM tiles with proper interpolation
- [ ] SRTM tile manager (auto-download, cache)
- [ ] Bilinear interpolation for elevation sampling
- [ ] Multi-tile coverage for large radius
- [ ] Terrain profile accuracy validation
- [ ] Compare with current terrain data quality
### 5.2 Project Persistence
**Priority:** MEDIUM
- [ ] Save/load projects (JSON or SQLite)
- [ ] Site configurations persistence
- [ ] Coverage results caching
- [ ] Session history persistence across restarts
- [ ] Export coverage report (PDF/PNG)
### 5.3 Accuracy Validation
**Priority:** MEDIUM
- [ ] Compare with known coverage maps
- [ ] Field measurements with real equipment
- [ ] Calibrate propagation models per environment
- [ ] Antenna pattern library (real equipment specs)
---
## 🔮 Future Phases
### Phase 6: Multi-Station & Dashboard
- [ ] Multi-station view (aggregate coverage)
- [ ] Station discovery via WireGuard mesh
- [ ] Coverage gap analysis
- [ ] Interference modeling between stations
- [ ] Handover zone visualization
### Phase 7: Hardware Integration
- [ ] LimeSDR Mini 2.0 testing
- [ ] Real RF attach validation
- [ ] sysmoISIM-SJA2 SIM integration
- [ ] ZTE B8200 base station testing
- [ ] INFOZAHYST Plastun SDR (if accessible)
### Phase 8: Advanced Features
- [ ] 3D visualization mode
- [ ] Link budget analysis view
- [ ] Frequency planning tool
- [ ] Indoor coverage modeling
- [ ] Time-series analysis (seasonal vegetation)
- [ ] Offline mode (embedded terrain DB)
### Phase 9: Distribution
- [ ] Auto-updater (electron-updater)
- [ ] Live USB distribution for field deployment
- [ ] Standalone offline package
- [ ] User documentation / help system
---
## 🏛️ Architecture Overview
```
RFCP Application (Electron)
├── Frontend (React + TypeScript + Vite)
│ ├── Leaflet map with custom canvas heatmap
│ ├── Zustand state management
│ └── WebSocket for progress streaming
├── Backend (Python FastAPI)
│ ├── Coverage Engine
│ │ ├── Grid generator (adaptive zones)
│ │ ├── GPU pipeline (CuPy/CUDA) — main process
│ │ │ ├── Phase 2.5: distances + path_loss
│ │ │ ├── Phase 2.6: terrain LOS + diffraction
│ │ │ └── Phase 2.7: antenna pattern
│ │ └── CPU workers (ProcessPool) — 3-6 workers
│ │ ├── Building obstruction (spatial index)
│ │ ├── Reflections (ray-building intersection)
│ │ └── Vegetation loss (bbox pre-filter)
│ │
│ ├── Propagation Models (8 models)
│ │ ├── Free-Space Path Loss
│ │ ├── Okumura-Hata (150-1500 MHz)
│ │ ├── COST-231-Hata (1500-2000 MHz)
│ │ ├── ITU-R P.1546
│ │ └── ... 4 more
│ │
│ ├── OSM Services
│ │ ├── Buildings (Overpass API + cache)
│ │ ├── Vegetation (bbox pre-filter)
│ │ ├── Water bodies
│ │ └── Streets
│ │
│ └── Terrain Service
│ ├── HGT tile download + cache
│ ├── Elevation sampling
│ └── Line-of-sight checking
└── Desktop (Electron)
├── Backend process management
└── NSIS installer (1.2 GB with CUDA)
```
---
## 📈 Development Timeline
```
Jan 30, 2025 Phase 1: Frontend complete (10 iterations)
Feb 01, 2025 Phase 2: Backend architecture (48 files, 82 tests)
Feb 02, 2025 Phase 3: LOD + performance optimization
Feb 03, 2025 Phase 3.5-3.6: GPU setup + CUDA build
Feb 04, 2025 Phase 3.7-3.8: GPU vectorization complete ⭐
─────────────────────────────────────────
Full preset: 195s → 11.2s (17.4x speedup)
Standard: 38s → 2.3s (16.5x speedup)
```
**Total development time:** ~5 days intensive
**Total iterations:** 3.8.0 (20+ sub-iterations)
**Architecture:** Battle-tested, production-ready
---
## 🧰 Tech Stack
| Component | Technology | Version |
|-----------|-----------|---------|
| Frontend | React + TypeScript | 18 |
| Build | Vite | 5.x |
| Map | Leaflet | 1.9 |
| State | Zustand | 4.x |
| Backend | Python FastAPI | 3.12 |
| GPU | CuPy + CUDA | 13.x |
| Parallel | ProcessPoolExecutor | stdlib |
| Terrain | NumPy (HGT tiles) | 1.26 |
| Desktop | Electron | 28.x |
| Installer | NSIS (via electron-builder) | - |
| Build (BE) | PyInstaller | 6.x |
---
*"11.2 seconds. Full preset. 6,640 points. GPU acceleration complete."*
*— February 4, 2026*

View File

@@ -0,0 +1,345 @@
# RFCP: WebGL Radial Gradients Coverage Layer
## Мета
Переробити WebGL coverage layer з texture-based підходу на **radial gradients** — як працює Canvas GeographicHeatmap, але на GPU.
## Чому radial gradients краще для візуалізації
**Texture-based (поточний):**
- Кожна точка = 1 pixel в grid
- Nearest neighbor fill → blocky квадрати
- Навіть з smoothstep — видно grid структуру
- ✅ Добре для: terrain detail, точні значення
- ❌ Погано для: красива візуалізація
**Radial gradients (Canvas heatmap):**
- Кожна точка = круг з radial falloff
- Smooth blending між точками
- Природній вигляд coverage
- ✅ Добре для: красива візуалізація, презентації
- ❌ Погано для: точні значення (blending спотворює)
## Архітектура WebGL Radial Gradients
### Підхід: Multi-pass additive blending
```
Pass 1-N: Для кожної точки (або batch точок)
├── Малюємо full-screen quad
├── Fragment shader: radial falloff від центру точки
├── Output: (weight * value, weight, 0, 1)
└── Blending: GL_ONE, GL_ONE (additive)
Final Pass:
├── Читаємо accumulated texture
├── Normalize: value = R / G (weighted average)
└── Apply colormap
```
### Альтернатива: Single-pass з texture atlas
Замість N проходів, закодувати всі точки в texture і в одному fragment shader пройтись по всіх:
```glsl
// Fragment shader
uniform sampler2D u_points; // texture з точками: (lat, lon, rsrp, radius)
uniform int u_pointCount;
void main() {
vec2 worldPos = getWorldPosition(v_uv);
float totalWeight = 0.0;
float totalValue = 0.0;
for (int i = 0; i < MAX_POINTS; i++) {
if (i >= u_pointCount) break;
vec4 point = texelFetch(u_points, ivec2(i, 0), 0);
vec2 pointPos = point.xy;
float rsrp = point.z;
float radius = point.w;
float dist = distance(worldPos, pointPos);
float weight = smoothstep(radius, 0.0, dist);
totalWeight += weight;
totalValue += weight * rsrp;
}
if (totalWeight < 0.001) discard;
float avgRsrp = totalValue / totalWeight;
vec3 color = rsrpToColor(avgRsrp);
gl_FragColor = vec4(color, smoothstep(0.0, 0.1, totalWeight));
}
```
**Проблема:** Loop по 6,675 точках в кожному fragment = дуже повільно.
### Рекомендований підхід: Batched additive blending
```
1. Створити offscreen framebuffer (float texture)
2. Для кожної точки (або batch по 100-500):
- Малювати quad розміром з radius точки
- Additive blend: (weight * rsrp, weight)
3. Final pass: normalize + colormap
```
Це як Mapbox heatmap працює.
---
## Імплементація
### Крок 1: Створити offscreen framebuffer
```typescript
// Accumulation texture (RG float for weighted sum)
const accumTexture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RG32F, width, height, 0, gl.RG, gl.FLOAT, null);
const framebuffer = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, accumTexture, 0);
```
**Примітка:** Потрібен `EXT_color_buffer_float` extension для float framebuffer.
### Крок 2: Point rendering shader
**Vertex shader:**
```glsl
attribute vec2 a_position; // quad vertices
attribute vec2 a_pointCenter; // point lat/lon (instanced)
attribute float a_pointRsrp; // point RSRP (instanced)
attribute float a_pointRadius; // point radius in pixels (instanced)
uniform mat4 u_matrix; // world to clip transform
varying vec2 v_localPos; // position relative to point center
varying float v_rsrp;
void main() {
// Expand quad around point center
vec2 worldPos = a_pointCenter + a_position * a_pointRadius;
gl_Position = u_matrix * vec4(worldPos, 0.0, 1.0);
v_localPos = a_position; // -1 to 1
v_rsrp = a_pointRsrp;
}
```
**Fragment shader:**
```glsl
precision highp float;
varying vec2 v_localPos;
varying float v_rsrp;
void main() {
// Radial distance from center (0 at center, 1 at edge)
float dist = length(v_localPos);
// Discard outside circle
if (dist > 1.0) discard;
// Radial falloff (smooth at edges)
float weight = 1.0 - smoothstep(0.0, 1.0, dist);
// Or gaussian: weight = exp(-dist * dist * 2.0);
// Output: (weight * normalized_rsrp, weight)
float normalizedRsrp = (v_rsrp + 130.0) / 80.0; // -130 to -50 → 0 to 1
gl_FragColor = vec4(weight * normalizedRsrp, weight, 0.0, 1.0);
}
```
### Крок 3: Final compositing shader
```glsl
precision highp float;
uniform sampler2D u_accumTexture;
varying vec2 v_uv;
vec3 rsrpToColor(float t) {
// t: 0 = weak (red), 1 = strong (cyan)
if (t < 0.25) return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), t / 0.25);
if (t < 0.5) return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.25) / 0.25);
if (t < 0.75) return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.5) / 0.25);
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.75) / 0.25);
}
void main() {
vec2 accum = texture2D(u_accumTexture, v_uv).rg;
float totalValue = accum.r;
float totalWeight = accum.g;
// No coverage
if (totalWeight < 0.001) discard;
// Weighted average RSRP
float avgRsrp = totalValue / totalWeight;
// Color mapping
vec3 color = rsrpToColor(avgRsrp);
// Alpha based on weight (fade at edges)
float alpha = smoothstep(0.0, 0.1, totalWeight) * 0.85;
gl_FragColor = vec4(color, alpha);
}
```
### Крок 4: Rendering loop
```typescript
function render() {
const canvas = canvasRef.current;
const gl = glRef.current;
// 1. Position canvas over map
const nw = map.latLngToLayerPoint([bounds.maxLat, bounds.minLon]);
const se = map.latLngToLayerPoint([bounds.minLat, bounds.maxLon]);
canvas.style.transform = `translate(${nw.x}px, ${nw.y}px)`;
canvas.style.width = `${se.x - nw.x}px`;
canvas.style.height = `${se.y - nw.y}px`;
// 2. Clear accumulation buffer
gl.bindFramebuffer(gl.FRAMEBUFFER, accumFramebuffer);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
// 3. Render points with additive blending
gl.useProgram(pointProgram);
gl.enable(gl.BLEND);
gl.blendFunc(gl.ONE, gl.ONE); // Additive
// Set uniforms (matrix, etc.)
const matrix = calculateWorldToClipMatrix(bounds, canvas.width, canvas.height);
gl.uniformMatrix4fv(u_matrix, false, matrix);
// Draw all points (instanced if supported, or batched)
drawPoints(gl, points);
// 4. Final composite pass
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.useProgram(compositeProgram);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA); // Normal blend
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
drawFullscreenQuad(gl);
}
```
---
## Оптимізації
### 1. Instanced rendering (якщо підтримується)
```typescript
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (ext) {
// Use instanced rendering - draw all points in one call
ext.drawArraysInstancedANGLE(gl.TRIANGLE_STRIP, 0, 4, points.length);
}
```
### 2. Spatial culling
Малювати тільки точки що потрапляють у viewport:
```typescript
const visiblePoints = points.filter(p => {
const screenPos = map.latLngToContainerPoint([p.lat, p.lon]);
return screenPos.x > -radius && screenPos.x < canvas.width + radius &&
screenPos.y > -radius && screenPos.y < canvas.height + radius;
});
```
### 3. Dynamic radius based on zoom
```typescript
const zoom = map.getZoom();
const metersPerPixel = 40075016.686 * Math.cos(centerLat * Math.PI / 180) / Math.pow(2, zoom + 8);
const radiusPixels = (settings.resolution * 1.5) / metersPerPixel;
```
### 4. Resolution scaling
На низьких zoom рівнях, рендерити в менший framebuffer і upscale:
```typescript
const scale = zoom < 10 ? 0.5 : zoom < 12 ? 0.75 : 1.0;
const fbWidth = Math.round(canvas.width * scale);
const fbHeight = Math.round(canvas.height * scale);
```
---
## Порівняння з поточним texture-based
| Аспект | Texture-based | Radial gradients |
|--------|---------------|------------------|
| Візуалізація | Blocky | Smooth |
| Terrain detail | Добре | Менш точно |
| Performance | Швидко (1 draw call) | Повільніше (N points) |
| Memory | Texture size | Framebuffer + points |
| Код складність | Середня | Висока |
---
## Чеклист імплементації
### Phase 1: Basic setup
- [ ] Створити новий файл `WebGLRadialCoverageLayer.tsx`
- [ ] Setup WebGL context з float extensions
- [ ] Створити accumulation framebuffer
- [ ] Базовий vertex/fragment shader для точок
### Phase 2: Point rendering
- [ ] Implement point quad rendering
- [ ] Radial falloff function
- [ ] Additive blending
- [ ] Test з кількома точками
### Phase 3: Compositing
- [ ] Final pass shader
- [ ] Weighted average calculation
- [ ] Color mapping
- [ ] Alpha/transparency
### Phase 4: Integration
- [ ] Map positioning (як в поточному WebGL layer)
- [ ] Map event listeners (move/zoom)
- [ ] Opacity control
- [ ] Toggle в UI
### Phase 5: Optimization
- [ ] Instanced rendering
- [ ] Spatial culling
- [ ] Dynamic radius
- [ ] Resolution scaling
---
## Fallback
Якщо WebGL radial не працює (older GPU, missing extensions):
- Fallback до Canvas GeographicHeatmap
- Або до поточного texture-based WebGL
---
## Референси
1. [Mapbox GL Heatmap implementation](https://github.com/mapbox/mapbox-gl-js/blob/main/src/render/draw_heatmap.js)
2. [deck.gl HeatmapLayer](https://deck.gl/docs/api-reference/aggregation-layers/heatmap-layer)
3. [WebGL additive blending](https://webglfundamentals.org/webgl/lessons/webgl-text-texture.html)

View File

@@ -0,0 +1,281 @@
# RFCP v3.10.5: WebGL Smooth Coverage Implementation
## Контекст проблеми
**Поточний стан:**
- Backend повертає grid точок з lat/lon/RSRP (50m = 6,675 pts, 200m = 1,975 pts)
- WebGL texture-based rendering: points → texture → GL_LINEAR → colormap
- **Проблема:** Видимі grid squares/pixelation, особливо при zoom in або sparse grids (200m)
**Причина:**
- `GL_LINEAR` дає тільки C0 continuity (значення співпадають на краях, але похідні — ні)
- Це створює видимі "шви" між клітинками
## Рішення з ресерчу
### Ключовий інсайт
**Catmull-Rom spline interpolation** дає C1 continuity (smooth derivatives) І проходить через exact data values (на відміну від B-spline який blurs peaks).
**9-tap Catmull-Rom** замість `texture2D()`:
- 9 texture fetches замість 1
- ~0.32ms vs ~0.30ms на GTX 980 при 1920×1080
- Для нашої ~80×85 текстури — практично безкоштовно
### Критичне правило
**Інтерполювати RAW RSRP values ПЕРЕД colormap!**
- ❌ Неправильно: texture → colormap → interpolate (muddy colors)
- ✅ Правильно: texture → interpolate → colormap (clean gradients)
---
## Етап 1: Quick Fix (30 хвилин)
### Smoothstep coordinate remapping
Найшвидший спосіб прибрати grid edges — одна зміна в shader:
```glsl
// ЗАМІСТЬ:
vec4 texColor = texture2D(u_texture, v_uv);
// ВИКОРИСТАТИ:
vec4 textureSmooth(sampler2D tex, vec2 uv, vec2 texSize) {
vec2 p = uv * texSize + 0.5;
vec2 i = floor(p);
vec2 f = p - i;
f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0); // quintic hermite
return texture2D(tex, (i + f - 0.5) / texSize);
}
// В main():
vec4 texColor = textureSmooth(u_texture, v_uv, u_textureSize);
```
**Що це дає:**
- C2 continuity з одним texture read
- Прибирає видимі grid edges
- Мінімальний positional bias
**Потрібно додати uniform:**
```javascript
const textureSizeLocation = gl.getUniformLocation(program, 'u_textureSize');
gl.uniform2f(textureSizeLocation, textureWidth, textureHeight);
```
---
## Етап 2: Production Implementation (1-2 години)
### 9-tap Catmull-Rom Shader
```glsl
precision highp float;
uniform sampler2D u_texture;
uniform vec2 u_textureSize;
uniform float u_opacity;
varying vec2 v_uv;
// Catmull-Rom 9-tap interpolation
// Source: TheRealMJP's gist (108 GitHub stars)
vec4 SampleTextureCatmullRom(sampler2D tex, vec2 uv, vec2 texSize) {
vec2 samplePos = uv * texSize;
vec2 texPos1 = floor(samplePos - 0.5) + 0.5;
vec2 f = samplePos - texPos1;
// Catmull-Rom weights
vec2 w0 = f * (-0.5 + f * (1.0 - 0.5 * f));
vec2 w1 = 1.0 + f * f * (-2.5 + 1.5 * f);
vec2 w2 = f * (0.5 + f * (2.0 - 1.5 * f));
vec2 w3 = f * f * (-0.5 + 0.5 * f);
// Combine weights for optimized sampling
vec2 w12 = w1 + w2;
vec2 offset12 = w2 / (w1 + w2);
// Compute texture coordinates
vec2 texPos0 = (texPos1 - 1.0) / texSize;
vec2 texPos3 = (texPos1 + 2.0) / texSize;
vec2 texPos12 = (texPos1 + offset12) / texSize;
// 9 texture fetches (optimized from 16)
vec4 result = vec4(0.0);
result += texture2D(tex, vec2(texPos0.x, texPos0.y)) * w0.x * w0.y;
result += texture2D(tex, vec2(texPos12.x, texPos0.y)) * w12.x * w0.y;
result += texture2D(tex, vec2(texPos3.x, texPos0.y)) * w3.x * w0.y;
result += texture2D(tex, vec2(texPos0.x, texPos12.y)) * w0.x * w12.y;
result += texture2D(tex, vec2(texPos12.x, texPos12.y)) * w12.x * w12.y;
result += texture2D(tex, vec2(texPos3.x, texPos12.y)) * w3.x * w12.y;
result += texture2D(tex, vec2(texPos0.x, texPos3.y)) * w0.x * w3.y;
result += texture2D(tex, vec2(texPos12.x, texPos3.y)) * w12.x * w3.y;
result += texture2D(tex, vec2(texPos3.x, texPos3.y)) * w3.x * w3.y;
return result;
}
// RSRP to color mapping (cyan -> green -> yellow -> orange -> red)
vec3 rsrpToColor(float rsrp) {
// rsrp: normalized 0.0 (weak, -110dBm) to 1.0 (strong, -50dBm)
// Color stops: red -> orange -> yellow -> green -> cyan
vec3 c0 = vec3(1.0, 0.0, 0.0); // red (weak)
vec3 c1 = vec3(1.0, 0.5, 0.0); // orange
vec3 c2 = vec3(1.0, 1.0, 0.0); // yellow
vec3 c3 = vec3(0.0, 1.0, 0.0); // green
vec3 c4 = vec3(0.0, 1.0, 1.0); // cyan (strong)
float t = clamp(rsrp, 0.0, 1.0);
if (t < 0.25) {
return mix(c0, c1, t / 0.25);
} else if (t < 0.5) {
return mix(c1, c2, (t - 0.25) / 0.25);
} else if (t < 0.75) {
return mix(c2, c3, (t - 0.5) / 0.25);
} else {
return mix(c3, c4, (t - 0.75) / 0.25);
}
}
void main() {
// 1. Sample with Catmull-Rom interpolation (RAW value)
vec4 texColor = SampleTextureCatmullRom(u_texture, v_uv, u_textureSize);
float rsrpNormalized = texColor.r;
// 2. Discard if no coverage (validity check)
if (rsrpNormalized < 0.01) {
discard;
}
// 3. Apply colormap AFTER interpolation
vec3 color = rsrpToColor(rsrpNormalized);
// 4. Smooth boundary fading (optional)
float boundaryAlpha = smoothstep(0.01, 0.05, rsrpNormalized);
gl_FragColor = vec4(color, boundaryAlpha * u_opacity);
}
```
### JavaScript зміни
```javascript
// 1. Vertex shader (без змін)
const vertexShaderSource = `
attribute vec2 a_position;
attribute vec2 a_texCoord;
varying vec2 v_uv;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
v_uv = a_texCoord;
}
`;
// 2. При створенні texture — зберегти розміри
const textureWidth = gridWidth;
const textureHeight = gridHeight;
// 3. Передати uniform
const textureSizeLocation = gl.getUniformLocation(program, 'u_textureSize');
if (textureSizeLocation) {
gl.uniform2f(textureSizeLocation, textureWidth, textureHeight);
} else {
console.error('[WebGL] u_textureSize uniform NOT FOUND!');
}
// 4. Texture filtering — можна залишити LINEAR для fallback
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
```
---
## Етап 3: Texture Data Format
### Поточний формат (перевірити)
```javascript
// Normalized RSRP value (0-255 mapped to 0.0-1.0 in shader)
const normalized = (rsrp - minRsrp) / (maxRsrp - minRsrp);
const value = Math.round(normalized * 255);
// Store in R channel
textureData[idx] = value; // R = normalized RSRP
textureData[idx + 1] = value; // G (можна використати для validity mask)
textureData[idx + 2] = value; // B
textureData[idx + 3] = 255; // A = fully opaque
```
### Альтернатива: Float texture (краща точність)
```javascript
// Якщо браузер підтримує OES_texture_float
const ext = gl.getExtension('OES_texture_float');
if (ext) {
const floatData = new Float32Array(width * height);
for (const point of points) {
const normalized = (point.rsrp - minRsrp) / (maxRsrp - minRsrp);
floatData[gridY * width + gridX] = normalized;
}
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0,
gl.LUMINANCE, gl.FLOAT, floatData);
}
```
---
## Чеклист імплементації
### Phase 1: Quick Test (Smoothstep)
- [ ] Додати `u_textureSize` uniform
- [ ] Замінити `texture2D()` на `textureSmooth()`
- [ ] Тест на 50m і 200m
- [ ] Тест zoom in/out
### Phase 2: Production (Catmull-Rom)
- [ ] Імплементувати `SampleTextureCatmullRom()`
- [ ] Оновити colormap function
- [ ] Додати boundary fading
- [ ] Тест edge cases (краї текстури)
- [ ] Performance benchmark
### Phase 3: Polish
- [ ] Видалити старі CSS blur workarounds
- [ ] Видалити cellSize multiplication (не потрібно з Catmull-Rom)
- [ ] Cleanup debug logs
- [ ] Update version to v3.10.5
---
## Очікуваний результат
**До (GL_LINEAR):**
```
┌───┬───┬───┐
│ A │ B │ C │ ← Видимі краї між клітинками
├───┼───┼───┤ C0 continuity
│ D │ E │ F │
└───┴───┴───┘
```
**Після (Catmull-Rom):**
```
╭───────────────╮
│ ░░░▒▒▓▓██ │ ← Smooth gradient
│ ░░░▒▒▓▓██▓▓ │ C1 continuity
│ ░░▒▒▓▓██ │ Exact values at grid points
╰───────────────╯
```
---
## Референси
1. [TheRealMJP's 9-tap Catmull-Rom HLSL](https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1)
2. [Inigo Quilez - Better Texture Filtering](https://iquilezles.org/articles/texture/)
3. [2D Catmull-Rom in 4 samples - Shadertoy](https://www.shadertoy.com/view/4tyGDD)
4. [mapbox-gl-interpolate-heatmap](https://github.com/vinayakkulkarni/mapbox-gl-interpolate-heatmap)
5. [NVIDIA GPU Gems 2 - Fast Third-Order Texture Filtering](https://developer.nvidia.com/gpugems/gpugems2/part-iii-high-quality-rendering/chapter-20-fast-third-order-texture-filtering)

View File

@@ -0,0 +1,149 @@
# RFCP Session Summary — February 4, 2026
## GPU Acceleration Complete: 195s → 11.2s (17.4x Speedup)
---
## 🎯 Session Goal
Complete GPU acceleration pipeline and optimize Full preset performance.
## 📊 Results
### Performance Achievement
| Metric | Before (3.7.0) | After (3.8.0) | Improvement |
|--------|----------------|---------------|-------------|
| **Full preset** (6640 pts, 50m) | 195s | **11.2s** | **17.4x** |
| **Standard preset** (1975 pts, 200m) | 7.2s | **2.3s** (cached) | **3.1x** |
| Phase 2.5 (distances+path_loss) | 0.33s | **0.006s** | 55x |
| Phase 2.6 (terrain LOS) | 7.29s | **0.04s** | 182x |
| Per-point (workers) | 1.1ms | **0.1ms** | 11x |
### GPU Pipeline (Final Architecture)
```
Phase 1: OSM data fetch (Overpass API) ~6-10s (network)
Phase 2: Terrain tile download + cache ~4s first / 0s cached
Phase 2.5: GPU — distances + base path_loss 0.006s ⚡
Phase 2.6: GPU — terrain LOS + diffraction loss 0.04s ⚡
Phase 2.7: GPU — antenna pattern loss ~0s ⚡
Phase 3: CPU workers — buildings + vegetation ~2s
─────────────────────────────────────────────────
TOTAL (cached): ~2.3s (Standard)
TOTAL (cached): ~11.2s (Full)
```
---
## 🔧 Changes Made (Iterations 3.7.0 → 3.8.0)
### Iteration 3.7.0 — GPU Precompute Foundation
- Added `gpu_manager` import to `coverage_service.py`
- Grid arrays created on GPU (CuPy)
- GPU precompute for distances + path_loss (vectorized)
- Fixed critical bug: CuPy worker process crashes (CUDA context sharing)
- Solution: GPU only in main process, workers use precomputed CPU values
- Fixed frontend duplicate calculation guard
### Iteration 3.8.0 — Full Vectorization
- **Phase 2.6**: `batch_terrain_los()` in `gpu_service.py`
- Vectorized terrain profile sampling for ALL points simultaneously
- Earth curvature correction vectorized
- Fresnel clearance + diffraction loss vectorized
- **Phase 2.7**: `batch_antenna_pattern()` in `gpu_service.py`
- Workers receive precomputed `has_los`, `terrain_loss`, `antenna_loss`
- Workers only compute buildings + reflections + vegetation
### Critical Fix: `_batch_elevation_lookup` Vectorization
- **Before**: Python `for` loop over 59,250 coordinates (7.29s)
- **After**: Vectorized NumPy tile indexing, loop only over tiles (0.04s)
- **Impact**: 182x speedup on Phase 2.6 alone
### Critical Fix: Vegetation Bbox Pre-filter
- **Before**: Each sample point checked ALL 683 vegetation polygons
- **After**: Bounding box pre-filter skips 95%+ of polygons
- **Impact**: Full preset 156s → 11.2s
---
## 📁 Files Modified
### Backend
- `app/services/coverage_service.py` — precomputed values passthrough
- `app/services/parallel_coverage_service.py` — 5 worker functions updated
- `app/services/gpu_service.py` — batch_terrain_los, batch_antenna_pattern, batch_final_rsrp
- `app/services/vegetation_service.py` — bbox pre-filter on _point_in_vegetation
### Build
- PyInstaller ONEDIR build: 1.6 GB dist → 1.2 GB NSIS installer
- CUDA DLLs bundled (cublas, cusparse, curand, etc.)
- Runtime hook for DLL directory setup
---
## 🏗️ Architecture (Final State)
```
Main Process (asyncio event loop)
├── Phase 2.5: GPU precompute
│ └── CuPy arrays: distances, path_loss (vectorized)
├── Phase 2.6: GPU terrain LOS
│ └── Batch elevation lookup (vectorized NumPy)
│ └── Earth curvature + Fresnel (CuPy)
│ └── Diffraction loss (CuPy)
├── Phase 2.7: GPU antenna pattern
│ └── Bearing + pattern loss (CuPy)
└── Phase 3: CPU ProcessPool (3 workers)
└── Receive precomputed dict per point
└── Skip terrain/antenna (already computed)
└── Only: buildings + reflections + vegetation
└── Pure NumPy + CPU
```
**Key Rule**: GPU (CuPy) code ONLY in main process. Workers never import gpu_manager.
---
## 🎮 Side Activity: Dwarf Fortress Gamelog Analysis
Analyzed 102,669-line gamelog from fort "Lashderush (Prophethandle)":
- 8-9 years, 23 migrant waves, 1,943 masterpieces
- 51,599 combat actions, only 4 deaths (weredeer outbreak)
- Top crafter: Momuz Nëkorlibash (201 masterpieces)
- Sole survivor transforms between dwarf/weredeer
---
## 🔮 Next Steps
### Immediate
- [x] ~~GPU acceleration~~ ✅ COMPLETE
- [ ] SRTM terrain data integration (higher accuracy than current tiles)
- [ ] Session history persistence across app restarts
### Short Term
- [ ] Multi-station dashboard
- [ ] Project export/import (JSON)
- [ ] Link budget analysis view
### Medium Term
- [ ] LimeSDR hardware integration testing
- [ ] Real RF validation against field measurements
- [ ] 3D visualization mode
---
## 💡 Key Learnings
1. **Python for-loops are the enemy**`_batch_elevation_lookup` went from 7.3s to 0.04s by replacing enumerate(zip()) with NumPy indexing
2. **Spatial pre-filtering is massive** — vegetation bbox check eliminated 95%+ of polygon tests
3. **GPU context can't be shared across processes** — spawn mode creates new CUDA contexts that OOM
4. **Vectorize in main, distribute to workers** — best pattern for GPU + multiprocessing
5. **Profile before optimizing** — Phase 2.6 bottleneck was invisible until measured
---
*Session duration: ~4 hours*
*Lines of code changed: ~300*
*Performance gain: 17.4x*
*Feeling: 🚀*

View File

@@ -0,0 +1,260 @@
# RFCP Session 2026-02-04 — Complete Development Log
**Session:** February 4, 2026 (afternoon/evening)
**Duration:** ~6 hours active development
**Iterations completed:** 3.9.0 → 3.9.1 → 3.10.0 → 3.10.1 → 3.10.2 → 3.10.3 → 3.10.4 (pending)
---
## What Was Done This Session
### Infrastructure: terra.eliah.one Tile Server ✅
- **DNS:** terra.eliah.one → 2.56.207.143 (VPS A, Hayhost)
- **Caddy:** File server with browse at /opt/terra/tiles/
- **SRTM3 (90m):** 187 tiles, 514.5 MB — full Ukraine (N44-N51, E018-E041)
- **SRTM1 (30m):** 160 tiles, 3,957.3 MB — full Ukraine (N44-N51, E022-E041)
- **Sources:** viewfinderpanoramas.org (SRTM3, void-filled), AWS S3 elevation-tiles-prod (SRTM1)
- **Index:** /api/index → tile_index.json (version 2, dual dataset)
- **Public access verified:** https://terra.eliah.one/srtm1/ and /srtm3/
### Iteration 3.9.1: Terra Integration ✅
- terrain_service.py updated with prioritized SRTM sources:
1. terra.eliah.one/srtm1/ (30m, preferred)
2. terra.eliah.one/srtm3/ (90m, fallback)
3. AWS S3 skadi mirror (public fallback)
- New endpoints: /api/terrain/status, /api/terrain/download, /api/terrain/index
- Auto-downloads tiles on first use, cached permanently on disk
- 173 tiles loaded (4,278.6 MB) confirmed in Data Cache panel
### Iteration 3.10.0: Link Budget + Fresnel Zone + Interference ✅
- **Link Budget Calculator:** Full TX→RX path analysis panel
- EIRP calculation, FSPL, terrain loss, received power, link margin
- RX point placement on map (orange marker, dashed line)
- ✓ LINK OK / ✗ FAIL status with margin display
- **Fresnel Zone Visualization:** On Terrain Profile chart
- First Fresnel zone ellipse overlay (semi-transparent)
- Red highlighting where terrain intrudes zone
- Frequency-aware (zone size changes with MHz)
- Clearance calculation with recommendation text
- **Interference Modeling (C/I):** Backend ready
- Carrier-to-interference ratio per grid point
- Co-frequency site grouping
- GPU-accelerated (CuPy vectorized)
### Iteration 3.10.1: UI Bugfixes (partial) ✅
- Elevation opacity control
- Data Cache panel with region downloads
- Various dark theme text fixes
### Iteration 3.10.2: Tool Mode System ✅
- **ActiveTool state:** 'none' | 'ruler' | 'rx-placement' | 'site-placement'
- Single map click handler dispatches to active tool
- Cursor management (default/crosshair/cell per tool)
- Ruler snap-to-site (20px threshold)
- Event propagation fixes (partial — terrain profile still leaks)
### Iteration 3.10.3: Calculator Button + Ruler Limit ✅
- Calculator button added to right toolbar
- Ruler limited to 2 points max (point-to-point only)
- Third click starts new measurement
### Iteration 3.10.4: Pending Fixes 🔧
- Terrain Profile click-through (needs stopImmediatePropagation on native event)
- TX Height hardcoded to 2m in Link Budget (should read from site config)
---
## Current State — What Works
### Core Features ✅
- Multi-site RF coverage planning with multi-sector antennas
- GPU-accelerated coverage calculation (RTX 4060, CuPy/CUDA)
- 9 propagation models (Free-Space, terrain_los, buildings, materials, dominant_path, street_canyon, reflections, water_reflection, vegetation, atmospheric)
- Performance: 11.2s Full preset (17.4x speedup from v3.8.0)
- Geographic-scale heatmap with Leaflet tile rendering
### Terrain Integration ✅
- SRTM elevation data (30m and 90m resolution)
- Bilinear interpolation for sub-pixel accuracy
- Memory-mapped I/O with LRU cache (20 tiles)
- Auto-detection SRTM1 vs SRTM3 by file size
- Terrain-aware coverage calculation (Line of Sight, terrain loss)
- Terrain Profile viewer with elevation chart
### Analysis Tools ✅
- **Link Budget Calculator** — point-to-point path analysis
- **Fresnel Zone Visualization** — on terrain profile chart
- **Ruler/Distance Measurement** — 2-point with snap-to-site
- **Terrain Profile** — elevation cross-section between 2 points
- **Coverage Statistics** — Excellent/Good/Fair/Weak breakdown
- **Session History** — compare calculation runs
### Data Management ✅
- Export: CSV, GeoJSON coverage data
- Import/Export: Site configurations (JSON)
- Data Cache: Regional tile pre-download (Ukraine, Eastern Ukraine, Donbas, Central, Western, Kyiv)
- 173 terrain tiles (4.3 GB) cached locally
### Infrastructure ✅
- Frontend: React 18 + TypeScript + Vite + Leaflet
- Backend: Python FastAPI + CuPy GPU pipeline
- Tile Server: terra.eliah.one (Caddy file_server)
- Packaging: PyInstaller + Electron (Windows installer)
- Desktop app: RFCP - RF Coverage Planner (native window)
---
## Known Bugs (for 3.10.4+)
| # | Bug | Severity | Root Cause |
|---|-----|----------|------------|
| 1 | Terrain Profile click places ruler point | Medium | stopPropagation not blocking Leaflet's native DOM listener. Need `e.nativeEvent.stopImmediatePropagation()` or move popup outside Leaflet container |
| 2 | TX Height shows 2m in Link Budget | Low | Hardcoded default, not reading from site config field |
| 3 | Cursor still shows hand in some cases | Low | Leaflet default grab cursor not fully overridden |
| 4 | Elevation Colors opacity slider | Low | May need correct layer reference binding |
---
## Roadmap — Updated February 4, 2026
### ✅ COMPLETED (Iterations 1-3.10.3)
**Phase 1: Foundation** (Dec 2024)
- React + TypeScript + Vite + Leaflet setup
- Basic site management, coverage calculation
**Phase 2: Core Features** (Jan 2025, Iterations 1-10.1)
- Multi-site, multi-sector, geographic heatmap
- Coverage statistics, keyboard shortcuts
- Code audit, production polish
**Phase 3: GPU Acceleration** (Feb 2-3, 2026, Iterations 3.1-3.8)
- CuPy/CUDA pipeline: 195s → 11.2s (17.4x)
- PyInstaller build with CUDA bundling
- Windows native backend (no WSL2)
**Phase 4: Terrain Integration** (Feb 4, 2026, Iterations 3.9-3.10)
- SRTM tile server (terra.eliah.one)
- 347 tiles, 4.5 GB, full Ukraine coverage
- Terrain-aware propagation, terrain profiles
- Link budget calculator, Fresnel zones
- Tool mode system, interference modeling
### 🔧 REMAINING ON CURRENT STACK
**3.10.4: Final Bugfixes** (1-2 hours)
- Terrain Profile click propagation fix
- TX Height from site config
- Cursor cleanup
- Elevation opacity fix
**3.11: Polish & QA** (optional, 2-3 hours)
- Interference C/I heatmap toggle on frontend
- Coverage comparison mode (before/after)
- Keyboard shortcuts help modal (?)
- Settings persistence (localStorage)
- Input validation improvements
**3.12: Offline Package** (optional, 2-3 hours)
- SRTM3 tiles bundled in installer (~180 MB gzipped)
- SRTM1 as optional "HD Terrain Pack" download
- First-run extraction to data/terrain/
- Full offline operation without internet
### 🔮 FUTURE (New Stack — When Inspired)
**Stack Migration: Tauri + SvelteKit + Rust**
- Native performance without Electron overhead
- Rust backend replacing Python FastAPI
- GPU compute via wgpu or Vulkan
- Smaller installer (<100 MB vs current ~1.6 GB)
- Already tested Tauri for UMTC Wiki project
**Advanced RF Features:**
- 3D terrain visualization (Three.js or WebGPU)
- Drive test data import and comparison
- Multiple frequency band planning
- Custom propagation model editor
- Real-time collaboration (via Matrix?)
**Field Deployment:**
- Live USB with BitLocker encryption
- Offline-first with full Ukraine terrain
- Integration with UMTC tactical mesh
- LoRa/IoT device position planning
---
## Tech Specs Quick Reference
### Backend
```
Location: D:\root\rfcp\backend
Framework: FastAPI + Uvicorn
GPU: CuPy + CUDA (RTX 4060)
Python: 3.x with numpy, scipy, httpx
Build: PyInstaller ONEDIR (~1.6 GB with CUDA)
Start: python -m uvicorn app.main:app --host 0.0.0.0 --port 8000
```
### Frontend
```
Location: D:\root\rfcp\frontend
Framework: React 18 + TypeScript + Vite
Map: Leaflet + custom geographic heatmap
State: Zustand
Build: npm run build → dist/
Bundle: 163KB gzipped
```
### Tile Server
```
Domain: terra.eliah.one
Server: VPS A (2.56.207.143), Caddy file_server
Path: /opt/terra/tiles/srtm1/ and /opt/terra/tiles/srtm3/
Index: /api/index → tile_index.json
Health: /health → "ok"
Tiles: 187 SRTM3 (515 MB) + 160 SRTM1 (3.9 GB)
```
### Key Files
```
terrain_service.py — SRTM tile loading, bilinear interpolation, elevation profiles
gpu_service.py — CuPy/CUDA coverage calculation pipeline
coverage_service.py — Propagation models, coverage orchestration
routes/terrain.py — /api/terrain/status, /download, /index
routes/coverage.py — /api/link-budget, /api/fresnel-profile
frontend/src/store/tools.ts — ActiveTool state management
frontend/src/components/panels/LinkBudgetPanel.tsx
frontend/src/components/map/TerrainProfile.tsx
frontend/src/components/map/MeasurementTool.tsx
```
---
## Performance Benchmarks
| Preset | Resolution | Points | Time | GPU |
|--------|-----------|--------|------|-----|
| Standard | 200m | 1,975 | 7.4s | ✅ |
| Full | 50m | 6,639-6,662 | 11.2-11.7s | ✅ |
| 50km radius | 200m | 4,966 | ~30s | ✅ |
**GPU:** NVIDIA RTX 4060 (CUDA)
**Speedup:** 17.4x vs CPU-only (v3.7.0 baseline)
---
## Session Notes
Продуктивна сесія. За ~6 годин:
- Підняли tile server з нуля (terra.eliah.one)
- 347 тайлів terrain data для всієї України
- Інтегрували terrain в backend (auto-download, status API)
- Додали Link Budget Calculator, Fresnel Zone, Interference modeling
- Впровадили Tool Mode System для вирішення click conflicts
- Виправили купу UX багів
Продукт близький до завершення на поточному стеку. Основна функціональність працює, залишились polish баги та optional фічі. Рефактор на Tauri+SvelteKit+Rust — коли буде натхнення, не терміново.
Half Sword скачаний і чекає. 🗡️

View File

@@ -0,0 +1,193 @@
# RFCP v3.10.5 Session Summary - 2026-02-06
## Що зробили сьогодні
### 1. WebGL Texture-Based Coverage (ЗАВЕРШЕНО ✅)
**Проблема:** Canvas heatmap був blocky, хотіли smooth interpolation.
**Рішення:** Texture-based WebGL з smoothstep shader + nearest neighbor fill.
**Файл:** `frontend/src/components/map/WebGLCoverageLayer.tsx`
**Як працює:**
1. Створюємо texture де кожен pixel = RSRP value
2. Nearest neighbor fill для заповнення gaps (circular coverage → rectangular texture)
3. Smoothstep shader для C2 continuity interpolation
4. Colormap applied AFTER interpolation
**Статус:** Працює, але все ще blocky на zoom in через nearest neighbor fill.
---
### 2. WebGL Radial Gradients Coverage (В ПРОЦЕСІ 🔄)
**Мета:** Красиві smooth gradients як Canvas heatmap, але GPU-accelerated.
**Файл:** `frontend/src/components/map/WebGLRadialCoverageLayer.tsx`
**Як працює:**
1. Кожна точка = quad з Gaussian radial falloff
2. Additive blending в float framebuffer: (weight × rsrp, weight)
3. Final composite pass: normalize (R/G) + colormap
**Поточний статус:**
- ✅ Framebuffer створюється правильно
- ✅ Points рендеряться (framebuffer має дані)
- ✅ Composite pass працює (final pixel має колір)
- ✅ 50m показує beautiful smooth gradients!
- ✅ 200m тепер теж показує (після radius fix)
- ⚠️ Coverage radius не повний (обрізається раніше ніж 10km)
- ⚠️ Темне коло на периферії (falloff занадто різкий?)
- ⚠️ Selector dropdown сірий на білому (CSS issue)
---
### 3. Coverage Renderer Selector (ЗАВЕРШЕНО ✅)
**Файл:** `frontend/src/store/settings.ts`
**Додано:** `coverageRenderer: 'radial' | 'texture' | 'canvas'`
**UI:** Dropdown в Coverage Settings panel
**Fallback chain:**
- Radial fails → Texture
- Texture fails → Canvas
---
## Залишилось зробити (Next Session)
### Priority 1: Fix Radial Coverage Radius
**Симптом:** Coverage не покриває повні 10km, обрізається раніше.
**Можливі причини:**
1. Canvas bounds не включають padding для point radius
2. Points на краю мають gradient що виходить за canvas
3. Normalized coordinates calculation wrong at edges
**Debug:**
```javascript
// Перевірити bounds vs actual coverage extent
console.log('Canvas bounds:', bounds);
console.log('Points extent:', {
minLat: Math.min(...points.map(p => p.lat)),
maxLat: Math.max(...points.map(p => p.lat)),
// ...
});
```
**Fix approach:**
1. Додати padding до canvas bounds = point radius
2. Або clip points що виходять за межі
---
### Priority 2: Fix Dark Ring on Periphery
**Симптом:** Темне коло на краю coverage area.
**Причина:** Точки на периферії мають менше сусідів → менший total weight → темніший колір після normalization.
**Fix options:**
1. Збільшити radius multiplier (3.0× замість 2.5×)
2. Або додати edge detection і boost alpha там
3. Або використати min weight threshold перед normalization
---
### Priority 3: Fix Selector Dropdown Styling
**Симптом:** Сірий текст на білому фоні (погано видно).
**Fix:** Update CSS classes в App.tsx для dropdown.
---
### Priority 4: Performance Testing
Протестувати з великою кількістю точок:
- 10,000+ points
- 50,000+ points
- Measure frame time
Якщо повільно — implement instanced rendering.
---
## Files Changed Today
```
frontend/src/components/map/
├── WebGLCoverageLayer.tsx # Texture-based (updated with NN fill)
├── WebGLRadialCoverageLayer.tsx # NEW - Radial gradients
└── GeographicHeatmap.tsx # Canvas fallback (unchanged)
frontend/src/store/
└── settings.ts # Added coverageRenderer option
frontend/src/
└── App.tsx # Integrated renderer selector
```
---
## Console Debug Commands
```javascript
// Check which renderer is active
document.querySelectorAll('canvas').forEach(c =>
console.log(c.className, c.width, c.height)
);
// Check WebGL errors
const canvas = document.querySelector('.webgl-radial-coverage');
const gl = canvas?.getContext('webgl');
console.log('WebGL error:', gl?.getError());
// Read center pixel
gl?.readPixels(canvas.width/2, canvas.height/2, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(4));
```
---
## Key Insights Learned
1. **Texture-based vs Radial:** Texture good for terrain detail accuracy, Radial good for beautiful visualization.
2. **Float framebuffer:** Need `EXT_color_buffer_float` extension. Fallback: use RGBA8 with encoding.
3. **Additive blending:** `gl.blendFunc(gl.ONE, gl.ONE)` for accumulation, then `gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)` for final composite.
4. **Weighted average in shader:** Store (weight × value, weight), then normalize: value = R / G.
5. **Radius scaling:** Higher resolution = more points = smaller radius. Lower resolution = fewer points = larger radius to compensate.
---
## Git Status
- ✅ Pushed working WebGL texture-based coverage
- 🔄 WebGL radial in progress (functional but incomplete)
---
## Next Session Start Point
1. Відкрити RFCP project
2. `npm run dev` в frontend
3. Test radial coverage з 50m і 200m
4. Fix radius issue (Priority 1)
5. Fix dark ring (Priority 2)
6. Polish UI (Priority 3)
---
## Session Stats
- **Duration:** ~6 hours
- **Iterations:** 15+ fix attempts
- **Final result:** Working radial gradients renderer (90% complete)
- **Key breakthrough:** Discovering framebuffer had data but composite pass wasn't reading it

View File

@@ -1194,19 +1194,6 @@
"linux" "linux"
] ]
}, },
"node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.57.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.0.tgz",
"integrity": "sha512-OR5p5yG5OKSxHReWmwvM0P+VTPMwoBS45PXTMYaskKQqybkS3Kmugq1W+YbNWArF8/s7jQScgzXUhArzEQ7x0A==",
"cpu": [
"x64"
],
"dev": true,
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-x64-musl": { "node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.57.0", "version": "4.57.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.0.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.0.tgz",
@@ -3449,6 +3436,20 @@
"fsevents": "~2.3.2" "fsevents": "~2.3.2"
} }
}, },
"node_modules/rollup/node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.57.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.0.tgz",
"integrity": "sha512-OR5p5yG5OKSxHReWmwvM0P+VTPMwoBS45PXTMYaskKQqybkS3Kmugq1W+YbNWArF8/s7jQScgzXUhArzEQ7x0A==",
"cpu": [
"x64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/scheduler": { "node_modules/scheduler": {
"version": "0.27.0", "version": "0.27.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",

View File

@@ -6,6 +6,7 @@ import { useSitesStore } from '@/store/sites.ts';
import { useCoverageStore } from '@/store/coverage.ts'; import { useCoverageStore } from '@/store/coverage.ts';
import { useSettingsStore } from '@/store/settings.ts'; import { useSettingsStore } from '@/store/settings.ts';
import { useHistoryStore, pushToFuture, pushToPast } from '@/store/history.ts'; import { useHistoryStore, pushToFuture, pushToPast } from '@/store/history.ts';
import { useToolStore } from '@/store/tools.ts';
import { useToastStore } from '@/components/ui/Toast.tsx'; import { useToastStore } from '@/components/ui/Toast.tsx';
import { useKeyboardShortcuts } from '@/hooks/useKeyboardShortcuts.ts'; import { useKeyboardShortcuts } from '@/hooks/useKeyboardShortcuts.ts';
import { useUnsavedChanges } from '@/hooks/useUnsavedChanges.ts'; import { useUnsavedChanges } from '@/hooks/useUnsavedChanges.ts';
@@ -13,6 +14,8 @@ import { logger } from '@/utils/logger.ts';
import { db } from '@/db/schema.ts'; import { db } from '@/db/schema.ts';
import MapView from '@/components/map/Map.tsx'; import MapView from '@/components/map/Map.tsx';
import GeographicHeatmap from '@/components/map/GeographicHeatmap.tsx'; import GeographicHeatmap from '@/components/map/GeographicHeatmap.tsx';
import WebGLCoverageLayer from '@/components/map/WebGLCoverageLayer.tsx';
import WebGLRadialCoverageLayer from '@/components/map/WebGLRadialCoverageLayer.tsx';
import CoverageBoundary from '@/components/map/CoverageBoundary.tsx'; import CoverageBoundary from '@/components/map/CoverageBoundary.tsx';
import HeatmapLegend from '@/components/map/HeatmapLegend.tsx'; import HeatmapLegend from '@/components/map/HeatmapLegend.tsx';
import SiteList from '@/components/panels/SiteList.tsx'; import SiteList from '@/components/panels/SiteList.tsx';
@@ -20,6 +23,7 @@ import ExportPanel from '@/components/panels/ExportPanel.tsx';
import ProjectPanel from '@/components/panels/ProjectPanel.tsx'; import ProjectPanel from '@/components/panels/ProjectPanel.tsx';
import CoverageStats from '@/components/panels/CoverageStats.tsx'; import CoverageStats from '@/components/panels/CoverageStats.tsx';
import HistoryPanel from '@/components/panels/HistoryPanel.tsx'; import HistoryPanel from '@/components/panels/HistoryPanel.tsx';
import BatchFrequencyChange from '@/components/panels/BatchFrequencyChange.tsx';
import ResultsPanel from '@/components/panels/ResultsPanel.tsx'; import ResultsPanel from '@/components/panels/ResultsPanel.tsx';
import SiteImportExport from '@/components/panels/SiteImportExport.tsx'; import SiteImportExport from '@/components/panels/SiteImportExport.tsx';
import { SiteConfigModal } from '@/components/modals/index.ts'; import { SiteConfigModal } from '@/components/modals/index.ts';
@@ -28,6 +32,8 @@ import ToastContainer from '@/components/ui/Toast.tsx';
import ThemeToggle from '@/components/ui/ThemeToggle.tsx'; import ThemeToggle from '@/components/ui/ThemeToggle.tsx';
import GPUIndicator from '@/components/ui/GPUIndicator.tsx'; import GPUIndicator from '@/components/ui/GPUIndicator.tsx';
import TerrainProfile from '@/components/map/TerrainProfile.tsx'; import TerrainProfile from '@/components/map/TerrainProfile.tsx';
import LinkBudgetPanel from '@/components/panels/LinkBudgetPanel.tsx';
import LinkBudgetOverlay from '@/components/map/LinkBudgetOverlay.tsx';
import Button from '@/components/ui/Button.tsx'; import Button from '@/components/ui/Button.tsx';
import NumberInput from '@/components/ui/NumberInput.tsx'; import NumberInput from '@/components/ui/NumberInput.tsx';
import ConfirmDialog from '@/components/ui/ConfirmDialog.tsx'; import ConfirmDialog from '@/components/ui/ConfirmDialog.tsx';
@@ -59,7 +65,7 @@ async function restoreSites(snapshot: Site[]) {
export default function App() { export default function App() {
const loadSites = useSitesStore((s) => s.loadSites); const loadSites = useSitesStore((s) => s.loadSites);
const sites = useSitesStore((s) => s.sites); const sites = useSitesStore((s) => s.sites);
const setPlacingMode = useSitesStore((s) => s.setPlacingMode); const selectedSiteId = useSitesStore((s) => s.selectedSiteId);
const coverageResult = useCoverageStore((s) => s.result); const coverageResult = useCoverageStore((s) => s.result);
const isCalculating = useCoverageStore((s) => s.isCalculating); const isCalculating = useCoverageStore((s) => s.isCalculating);
@@ -109,15 +115,20 @@ export default function App() {
const setTerrainOpacity = useSettingsStore((s) => s.setTerrainOpacity); const setTerrainOpacity = useSettingsStore((s) => s.setTerrainOpacity);
const showGrid = useSettingsStore((s) => s.showGrid); const showGrid = useSettingsStore((s) => s.showGrid);
const setShowGrid = useSettingsStore((s) => s.setShowGrid); const setShowGrid = useSettingsStore((s) => s.setShowGrid);
const measurementMode = useSettingsStore((s) => s.measurementMode);
const setMeasurementMode = useSettingsStore((s) => s.setMeasurementMode);
const showElevationInfo = useSettingsStore((s) => s.showElevationInfo); const showElevationInfo = useSettingsStore((s) => s.showElevationInfo);
// Tool store (centralized active tool state)
const activeTool = useToolStore((s) => s.activeTool);
const setActiveTool = useToolStore((s) => s.setActiveTool);
const clearTool = useToolStore((s) => s.clearTool);
const setShowElevationInfo = useSettingsStore((s) => s.setShowElevationInfo); const setShowElevationInfo = useSettingsStore((s) => s.setShowElevationInfo);
const showBoundary = useSettingsStore((s) => s.showBoundary); const showBoundary = useSettingsStore((s) => s.showBoundary);
const showElevationOverlay = useSettingsStore((s) => s.showElevationOverlay); const showElevationOverlay = useSettingsStore((s) => s.showElevationOverlay);
const setShowElevationOverlay = useSettingsStore((s) => s.setShowElevationOverlay); const setShowElevationOverlay = useSettingsStore((s) => s.setShowElevationOverlay);
const elevationOpacity = useSettingsStore((s) => s.elevationOpacity); const elevationOpacity = useSettingsStore((s) => s.elevationOpacity);
const setElevationOpacity = useSettingsStore((s) => s.setElevationOpacity); const setElevationOpacity = useSettingsStore((s) => s.setElevationOpacity);
const coverageRenderer = useSettingsStore((s) => s.coverageRenderer);
const setCoverageRenderer = useSettingsStore((s) => s.setCoverageRenderer);
// History (undo/redo) // History (undo/redo)
const canUndo = useHistoryStore((s) => s.canUndo); const canUndo = useHistoryStore((s) => s.canUndo);
@@ -136,6 +147,8 @@ export default function App() {
const [showShortcuts, setShowShortcuts] = useState(false); const [showShortcuts, setShowShortcuts] = useState(false);
const [kbDeleteTarget, setKbDeleteTarget] = useState<{ id: string; name: string } | null>(null); const [kbDeleteTarget, setKbDeleteTarget] = useState<{ id: string; name: string } | null>(null);
const [profileEndpoints, setProfileEndpoints] = useState<{ start: [number, number]; end: [number, number] } | null>(null); const [profileEndpoints, setProfileEndpoints] = useState<{ start: [number, number]; end: [number, number] } | null>(null);
const [showLinkBudget, setShowLinkBudget] = useState(false);
const [linkBudgetRxPoint, setLinkBudgetRxPoint] = useState<{ lat: number; lon: number } | null>(null);
// Region wizard for first-run (desktop mode only) // Region wizard for first-run (desktop mode only)
const [showWizard, setShowWizard] = useState(false); const [showWizard, setShowWizard] = useState(false);
@@ -212,17 +225,26 @@ export default function App() {
loadSites(); loadSites();
}, [loadSites]); }, [loadSites]);
// Handle map click -> open modal with coordinates // Handle site placement from map click
const handleMapClick = useCallback( const handleSitePlacement = useCallback(
(lat: number, lon: number) => { (lat: number, lon: number) => {
setModalState({ setModalState({
isOpen: true, isOpen: true,
mode: 'create', mode: 'create',
initialData: { lat, lon }, initialData: { lat, lon },
}); });
setPlacingMode(false); // Tool store clearTool() is called by MapClickHandler after placement
}, },
[setPlacingMode] []
);
// Handle RX point placement for Link Budget
const handleRxPlacement = useCallback(
(lat: number, lon: number) => {
setLinkBudgetRxPoint({ lat, lon });
// Tool store clearTool() is called by MapClickHandler after placement
},
[]
); );
const handleEditSite = useCallback((site: Site) => { const handleEditSite = useCallback((site: Site) => {
@@ -443,11 +465,14 @@ export default function App() {
); );
} else { } else {
const timeStr = result.calculationTime.toFixed(1); const timeStr = result.calculationTime.toFixed(1);
const firstSite = sites.find((s) => s.visible);
const freqStr = firstSite ? ` \u2022 ${firstSite.frequency} MHz` : '';
const presetStr = settings.preset ? ` \u2022 ${settings.preset}` : '';
const modelsStr = result.modelsUsed?.length const modelsStr = result.modelsUsed?.length
? ` ${result.modelsUsed.length} models` ? ` \u2022 ${result.modelsUsed.length} models`
: ''; : '';
addToast( addToast(
`Calculated ${result.totalPoints.toLocaleString()} points in ${timeStr}s${modelsStr}`, `${result.totalPoints.toLocaleString()} pts \u2022 ${timeStr}s${presetStr}${freqStr}${modelsStr}`,
'success' 'success'
); );
} }
@@ -480,7 +505,7 @@ export default function App() {
return ( return (
<div className="h-screen w-screen flex flex-col bg-gray-100 dark:bg-dark-bg"> <div className="h-screen w-screen flex flex-col bg-gray-100 dark:bg-dark-bg">
{/* Header */} {/* Header */}
<header className="bg-slate-800 dark:bg-slate-900 text-white px-4 py-2 flex items-center justify-between flex-shrink-0 z-10"> <header className="bg-slate-800 dark:bg-slate-900 text-white px-4 py-2 flex items-center justify-between flex-shrink-0 z-[1010]">
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<span className="text-base font-bold">RFCP</span> <span className="text-base font-bold">RFCP</span>
<span className="text-xs text-slate-400 hidden sm:inline"> <span className="text-xs text-slate-400 hidden sm:inline">
@@ -664,30 +689,83 @@ export default function App() {
{/* Map */} {/* Map */}
<div className="flex-1 relative"> <div className="flex-1 relative">
<MapView <MapView
onMapClick={handleMapClick} onSitePlacement={handleSitePlacement}
onRxPlacement={handleRxPlacement}
onEditSite={handleEditSite} onEditSite={handleEditSite}
onProfileRequest={(start, end) => setProfileEndpoints({ start, end })} onProfileRequest={(start, end) => setProfileEndpoints({ start, end })}
showLinkBudget={showLinkBudget}
onToggleLinkBudget={() => setShowLinkBudget(!showLinkBudget)}
> >
{/* Show partial results during tiled calculation, or final result */} {/* Show partial results during tiled calculation, or final result */}
{(coverageResult || (isCalculating && partialPoints.length > 0)) && ( {(coverageResult || (isCalculating && partialPoints.length > 0)) && (
<> <>
<GeographicHeatmap {/* Render coverage layer based on selected renderer */}
points={isCalculating && partialPoints.length > 0 ? partialPoints : (coverageResult?.points ?? [])} {coverageRenderer === 'webgl-radial' && (
visible={heatmapVisible} <WebGLRadialCoverageLayer
opacity={settings.heatmapOpacity} key="webgl-radial-coverage"
radiusMeters={settings.heatmapRadius} points={isCalculating && partialPoints.length > 0 ? partialPoints : (coverageResult?.points ?? [])}
rsrpThreshold={settings.rsrpThreshold} visible={heatmapVisible}
/> opacity={settings.heatmapOpacity}
minRsrp={-130}
maxRsrp={-50}
radiusMeters={settings.heatmapRadius}
onWebGLFailed={() => setCoverageRenderer('webgl-texture')}
/>
)}
{coverageRenderer === 'webgl-texture' && (
<WebGLCoverageLayer
key="webgl-coverage"
points={isCalculating && partialPoints.length > 0 ? partialPoints : (coverageResult?.points ?? [])}
visible={heatmapVisible}
opacity={settings.heatmapOpacity}
minRsrp={-130}
maxRsrp={-50}
onWebGLFailed={() => setCoverageRenderer('canvas')}
/>
)}
{coverageRenderer === 'canvas' && (
<GeographicHeatmap
key="canvas-coverage"
points={isCalculating && partialPoints.length > 0 ? partialPoints : (coverageResult?.points ?? [])}
visible={heatmapVisible}
opacity={settings.heatmapOpacity}
radiusMeters={settings.heatmapRadius}
rsrpThreshold={settings.rsrpThreshold}
/>
)}
{coverageResult && ( {coverageResult && (
<CoverageBoundary <CoverageBoundary
points={coverageResult.points.filter(p => p.rsrp >= settings.rsrpThreshold)} points={coverageResult.points.filter(p => p.rsrp >= settings.rsrpThreshold)}
visible={showBoundary} visible={showBoundary}
resolution={settings.resolution} resolution={settings.resolution}
boundary={coverageResult.boundary}
/> />
)} )}
</> </>
)} )}
{/* Link Budget TX-RX overlay */}
{showLinkBudget && linkBudgetRxPoint && (() => {
const txSite = sites.find(s => s.id === selectedSiteId);
return (
<LinkBudgetOverlay
txPoint={txSite ? { lat: txSite.lat, lon: txSite.lon } : null}
rxPoint={linkBudgetRxPoint}
onRxDrag={(lat, lon) => setLinkBudgetRxPoint({ lat, lon })}
/>
);
})()}
</MapView> </MapView>
{activeTool === 'rx-placement' && (
<div className="absolute top-4 left-1/2 -translate-x-1/2 z-[2000] bg-blue-600 text-white px-4 py-2 rounded-lg shadow-lg text-sm font-medium flex items-center gap-2">
<span>Click on map to set RX point</span>
<button
onClick={() => clearTool()}
className="text-white/70 hover:text-white ml-2"
>
Cancel
</button>
</div>
)}
<HeatmapLegend /> <HeatmapLegend />
<ResultsPanel /> <ResultsPanel />
{profileEndpoints && ( {profileEndpoints && (
@@ -697,6 +775,19 @@ export default function App() {
onClose={() => setProfileEndpoints(null)} onClose={() => setProfileEndpoints(null)}
/> />
)} )}
{showLinkBudget && (
<div className="absolute top-20 left-4 z-[1500]">
<LinkBudgetPanel
rxPoint={linkBudgetRxPoint}
onRequestMapClick={() => setActiveTool('rx-placement')}
onClose={() => {
setShowLinkBudget(false);
clearTool();
setLinkBudgetRxPoint(null);
}}
/>
</div>
)}
</div> </div>
{/* Side panel */} {/* Side panel */}
@@ -728,6 +819,11 @@ export default function App() {
{/* Site list */} {/* Site list */}
<SiteList onEditSite={handleEditSite} onAddSite={handleAddManual} /> <SiteList onEditSite={handleEditSite} onAddSite={handleAddManual} />
{/* Quick frequency change */}
<div className="bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-sm">
<BatchFrequencyChange />
</div>
{/* Coverage settings */} {/* Coverage settings */}
<div className="bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-sm p-4 space-y-3"> <div className="bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-sm p-4 space-y-3">
<h3 className="text-sm font-semibold text-gray-800 dark:text-dark-text"> <h3 className="text-sm font-semibold text-gray-800 dark:text-dark-text">
@@ -783,6 +879,24 @@ export default function App() {
unit="%" unit="%"
hint="Transparency of the RF coverage overlay" hint="Transparency of the RF coverage overlay"
/> />
<div>
<label className="text-sm font-medium text-gray-700 dark:text-dark-text">
Coverage Renderer
</label>
<p className="text-xs text-gray-400 dark:text-dark-muted mb-1">
Visualization style for coverage overlay
</p>
<select
value={coverageRenderer}
onChange={(e) => setCoverageRenderer(e.target.value as 'webgl-radial' | 'webgl-texture' | 'canvas')}
className="w-full mt-1 px-2 py-1.5 text-sm bg-white dark:bg-dark-border border border-gray-300 dark:border-dark-border rounded-md text-gray-700 dark:text-dark-text"
>
<option value="webgl-radial" className="bg-white dark:bg-slate-800 text-gray-700 dark:text-white">WebGL Radial (smooth)</option>
<option value="webgl-texture" className="bg-white dark:bg-slate-800 text-gray-700 dark:text-white">WebGL Texture (fast)</option>
<option value="canvas" className="bg-white dark:bg-slate-800 text-gray-700 dark:text-white">Canvas (fallback)</option>
</select>
</div>
{coverageRenderer === 'canvas' && (
<div> <div>
<label className="text-sm font-medium text-gray-700 dark:text-dark-text"> <label className="text-sm font-medium text-gray-700 dark:text-dark-text">
Heatmap Quality Heatmap Quality
@@ -812,6 +926,7 @@ export default function App() {
</p> </p>
)} )}
</div> </div>
)}
{/* Propagation Model Preset */} {/* Propagation Model Preset */}
<div> <div>
<label className="text-sm font-medium text-gray-700 dark:text-dark-text"> <label className="text-sm font-medium text-gray-700 dark:text-dark-text">
@@ -1088,15 +1203,15 @@ export default function App() {
<label className="flex items-center gap-2 cursor-pointer text-sm text-gray-700 dark:text-dark-text"> <label className="flex items-center gap-2 cursor-pointer text-sm text-gray-700 dark:text-dark-text">
<input <input
type="checkbox" type="checkbox"
checked={measurementMode} checked={activeTool === 'ruler'}
onChange={(e) => setMeasurementMode(e.target.checked)} onChange={(e) => e.target.checked ? setActiveTool('ruler') : clearTool()}
className="w-4 h-4 rounded border-gray-300 dark:border-dark-border accent-orange-600" className="w-4 h-4 rounded border-gray-300 dark:border-dark-border accent-orange-600"
/> />
Distance Measurement Distance Measurement
</label> </label>
{measurementMode && ( {activeTool === 'ruler' && (
<p className="text-xs text-gray-400 dark:text-dark-muted pl-6"> <p className="text-xs text-gray-400 dark:text-dark-muted pl-6">
Click to add points. Right-click to finish. Click start and end points. Esc to cancel.
</p> </p>
)} )}
<label className="flex items-center gap-2 cursor-pointer text-sm text-gray-700 dark:text-dark-text"> <label className="flex items-center gap-2 cursor-pointer text-sm text-gray-700 dark:text-dark-text">
@@ -1130,7 +1245,7 @@ export default function App() {
/> />
</div> </div>
)} )}
</div> </div>
</div> </div>
{/* Data Cache Status */} {/* Data Cache Status */}

View File

@@ -1,8 +1,8 @@
/** /**
* Renders a dashed polyline around the coverage zone boundary. * Renders a dashed polyline around the coverage zone boundary.
* *
* Uses @turf/concave to compute a concave hull (alpha shape) per site, * Prefers server-computed boundary if available (shapely concave_hull).
* which correctly follows sector/wedge shapes — not just convex circles. * Falls back to client-side @turf/concave computation.
* *
* Performance: ~20-50ms for 10k points (runs once per coverage change). * Performance: ~20-50ms for 10k points (runs once per coverage change).
*/ */
@@ -12,7 +12,7 @@ import { useMap } from 'react-leaflet';
import L from 'leaflet'; import L from 'leaflet';
import concave from '@turf/concave'; import concave from '@turf/concave';
import { featureCollection, point } from '@turf/helpers'; import { featureCollection, point } from '@turf/helpers';
import type { CoveragePoint } from '@/types/index.ts'; import type { CoveragePoint, BoundaryPoint } from '@/types/index.ts';
import { logger } from '@/utils/logger.ts'; import { logger } from '@/utils/logger.ts';
interface CoverageBoundaryProps { interface CoverageBoundaryProps {
@@ -21,6 +21,7 @@ interface CoverageBoundaryProps {
resolution: number; // meters — controls concave hull detail resolution: number; // meters — controls concave hull detail
color?: string; color?: string;
weight?: number; weight?: number;
boundary?: BoundaryPoint[]; // server-provided boundary (preferred)
} }
export default function CoverageBoundary({ export default function CoverageBoundary({
@@ -29,13 +30,25 @@ export default function CoverageBoundary({
resolution, resolution,
color = '#ffffff', // white — visible against red-to-blue gradient color = '#ffffff', // white — visible against red-to-blue gradient
weight = 2, weight = 2,
boundary,
}: CoverageBoundaryProps) { }: CoverageBoundaryProps) {
const map = useMap(); const map = useMap();
const layerRef = useRef<L.LayerGroup | null>(null); const layerRef = useRef<L.LayerGroup | null>(null);
// Compute boundary paths grouped by site // Compute boundary paths - prefer server boundary, fallback to client-side
const boundaryPaths = useMemo(() => { const boundaryPaths = useMemo(() => {
if (!visible || points.length === 0) return []; if (!visible) return [];
// Use server-provided boundary if available
if (boundary && boundary.length >= 3) {
const serverPath: L.LatLngExpression[] = boundary.map(
(p) => [p.lat, p.lon] as L.LatLngExpression
);
return [serverPath];
}
// Fallback to client-side computation
if (points.length === 0) return [];
// Group points by siteId (fallback to 'all' when siteId not available from API) // Group points by siteId (fallback to 'all' when siteId not available from API)
const bySite = new Map<string, CoveragePoint[]>(); const bySite = new Map<string, CoveragePoint[]>();
@@ -61,7 +74,7 @@ export default function CoverageBoundary({
} }
return paths; return paths;
}, [points, visible, resolution]); }, [points, visible, resolution, boundary]);
// Render / cleanup polylines // Render / cleanup polylines
useEffect(() => { useEffect(() => {
@@ -107,7 +120,10 @@ export default function CoverageBoundary({
/** /**
* Compute concave hull boundary path(s) for a set of coverage points. * Compute concave hull boundary path(s) for a set of coverage points.
* *
* maxEdge = resolution * 3 (in km) gives good detail without over-fitting. * Uses adaptive maxEdge based on point count and resolution:
* - More points → smaller maxEdge for finer detail
* - Larger resolution → larger maxEdge to avoid over-fitting
*
* Returns multiple paths if hull is a MultiPolygon (disjoint coverage areas). * Returns multiple paths if hull is a MultiPolygon (disjoint coverage areas).
* Falls back to empty if hull computation fails (e.g., collinear points). * Falls back to empty if hull computation fails (e.g., collinear points).
*/ */
@@ -121,8 +137,17 @@ function computeConcaveHulls(
const features = pts.map((p) => point([p.lon, p.lat])); const features = pts.map((p) => point([p.lon, p.lat]));
const fc = featureCollection(features); const fc = featureCollection(features);
// maxEdge in km — resolution * 3 balances detail vs smoothness // Adaptive maxEdge based on point density:
const maxEdge = (resolutionM * 3) / 1000; // - Base: resolution * 2 (tighter fit)
// - For sparse grids (<100 pts): use larger edge to avoid holes
// - For dense grids (>1000 pts): use smaller edge for detail
let multiplier = 2.0;
if (pts.length < 100) {
multiplier = 4.0; // Sparse: wider tolerance
} else if (pts.length > 1000) {
multiplier = 1.5; // Dense: finer detail
}
const maxEdge = (resolutionM * multiplier) / 1000;
try { try {
const hull = concave(fc, { maxEdge, units: 'kilometers' }); const hull = concave(fc, { maxEdge, units: 'kilometers' });

View File

@@ -45,6 +45,12 @@ export default function ElevationLayer({ visible, opacity }: ElevationLayerProps
const debounceRef = useRef<ReturnType<typeof setTimeout> | null>(null); const debounceRef = useRef<ReturnType<typeof setTimeout> | null>(null);
const abortRef = useRef<AbortController | null>(null); const abortRef = useRef<AbortController | null>(null);
const lastBoundsRef = useRef<string>(''); const lastBoundsRef = useRef<string>('');
const opacityRef = useRef(opacity);
// Keep opacity ref in sync
useEffect(() => {
opacityRef.current = opacity;
}, [opacity]);
const removeOverlay = useCallback(() => { const removeOverlay = useCallback(() => {
if (overlayRef.current) { if (overlayRef.current) {
@@ -119,21 +125,23 @@ export default function ElevationLayer({ visible, opacity }: ElevationLayerProps
// Remove old overlay // Remove old overlay
removeOverlay(); removeOverlay();
// Add new overlay // Add new overlay (opacity will be set by the dedicated effect)
const leafletBounds = L.latLngBounds( const leafletBounds = L.latLngBounds(
[data.bbox.min_lat, data.bbox.min_lon], [data.bbox.min_lat, data.bbox.min_lon],
[data.bbox.max_lat, data.bbox.max_lon], [data.bbox.max_lat, data.bbox.max_lon],
); );
overlayRef.current = L.imageOverlay(canvas.toDataURL(), leafletBounds, { overlayRef.current = L.imageOverlay(canvas.toDataURL(), leafletBounds, {
opacity, opacity: 0.5, // Default, will be updated by opacity effect
interactive: false, interactive: false,
zIndex: 97, zIndex: 97,
}); });
overlayRef.current.addTo(map); overlayRef.current.addTo(map);
// Apply current opacity immediately using ref
overlayRef.current.setOpacity(opacityRef.current);
} catch (_e) { } catch (_e) {
// Silently ignore fetch errors (network issues, aborts, etc.) // Silently ignore fetch errors (network issues, aborts, etc.)
} }
}, [map, opacity, removeOverlay]); }, [map, removeOverlay]);
// Update opacity on existing overlay // Update opacity on existing overlay
useEffect(() => { useEffect(() => {

View File

@@ -0,0 +1,83 @@
/**
* Link Budget Overlay
*
* Shows RX marker and dashed line from TX site to RX point.
*/
import { useEffect, useState } from 'react';
import { Marker, Polyline } from 'react-leaflet';
import L from 'leaflet';
interface LinkBudgetOverlayProps {
txPoint: { lat: number; lon: number } | null;
rxPoint: { lat: number; lon: number } | null;
onRxDrag?: (lat: number, lon: number) => void;
}
// Orange circle icon for RX marker
const rxIcon = L.divIcon({
className: 'rx-marker',
html: '<div style="width: 14px; height: 14px; background: #f97316; border: 2px solid white; border-radius: 50%; box-shadow: 0 2px 4px rgba(0,0,0,0.3);"></div>',
iconSize: [14, 14],
iconAnchor: [7, 7],
});
export default function LinkBudgetOverlay({ txPoint, rxPoint, onRxDrag }: LinkBudgetOverlayProps) {
const [markerRef, setMarkerRef] = useState<L.Marker | null>(null);
// Handle drag events
useEffect(() => {
if (!markerRef || !onRxDrag) return;
const handleDrag = () => {
const pos = markerRef.getLatLng();
onRxDrag(pos.lat, pos.lng);
};
markerRef.on('drag', handleDrag);
markerRef.on('dragend', handleDrag);
return () => {
markerRef.off('drag', handleDrag);
markerRef.off('dragend', handleDrag);
};
}, [markerRef, onRxDrag]);
if (!rxPoint) return null;
const rxLatLng: [number, number] = [rxPoint.lat, rxPoint.lon];
const txLatLng: [number, number] | null = txPoint ? [txPoint.lat, txPoint.lon] : null;
return (
<>
{/* Dashed line from TX to RX */}
{txLatLng && (
<Polyline
positions={[txLatLng, rxLatLng]}
pathOptions={{
color: '#f97316',
weight: 2,
dashArray: '8, 4',
opacity: 0.8,
}}
/>
)}
{/* RX marker (draggable) */}
<Marker
position={rxLatLng}
icon={rxIcon}
draggable={!!onRxDrag}
ref={(ref) => setMarkerRef(ref)}
eventHandlers={{
dragend: (e) => {
if (onRxDrag) {
const pos = e.target.getLatLng();
onRxDrag(pos.lat, pos.lng);
}
},
}}
/>
</>
);
}

View File

@@ -1,10 +1,12 @@
import { useRef, useCallback, useEffect } from 'react'; import { useRef, useCallback, useEffect, useState } from 'react';
import { MapContainer, TileLayer, useMapEvents, useMap } from 'react-leaflet'; import { MapContainer, TileLayer, useMapEvents, useMap } from 'react-leaflet';
import 'leaflet/dist/leaflet.css'; import 'leaflet/dist/leaflet.css';
import type { Map as LeafletMap } from 'leaflet'; import type { Map as LeafletMap } from 'leaflet';
import L from 'leaflet';
import type { Site } from '@/types/index.ts'; import type { Site } from '@/types/index.ts';
import { useSitesStore } from '@/store/sites.ts'; import { useSitesStore } from '@/store/sites.ts';
import { useSettingsStore } from '@/store/settings.ts'; import { useSettingsStore } from '@/store/settings.ts';
import { useToolStore } from '@/store/tools.ts';
import { useToastStore } from '@/components/ui/Toast.tsx'; import { useToastStore } from '@/components/ui/Toast.tsx';
import SiteMarker from './SiteMarker.tsx'; import SiteMarker from './SiteMarker.tsx';
import MapExtras from './MapExtras.tsx'; import MapExtras from './MapExtras.tsx';
@@ -14,23 +16,72 @@ import ElevationDisplay from './ElevationDisplay.tsx';
import ElevationLayer from './ElevationLayer.tsx'; import ElevationLayer from './ElevationLayer.tsx';
interface MapViewProps { interface MapViewProps {
onMapClick: (lat: number, lon: number) => void; onSitePlacement: (lat: number, lon: number) => void;
onRxPlacement?: (lat: number, lon: number) => void;
onEditSite: (site: Site) => void; onEditSite: (site: Site) => void;
onProfileRequest?: (start: [number, number], end: [number, number]) => void; onProfileRequest?: (start: [number, number], end: [number, number]) => void;
showLinkBudget?: boolean;
onToggleLinkBudget?: () => void;
children?: React.ReactNode; children?: React.ReactNode;
} }
const SNAP_THRESHOLD_PX = 20;
/**
* Unified map click handler that dispatches based on active tool
*/
function MapClickHandler({ function MapClickHandler({
onMapClick, onSitePlacement,
onRxPlacement,
onRulerClick,
sites,
}: { }: {
onMapClick: (lat: number, lon: number) => void; onSitePlacement: (lat: number, lon: number) => void;
onRxPlacement?: (lat: number, lon: number) => void;
onRulerClick: (lat: number, lon: number) => void;
sites: Site[];
}) { }) {
const isPlacingMode = useSitesStore((s) => s.isPlacingMode); const activeTool = useToolStore((s) => s.activeTool);
const clearTool = useToolStore((s) => s.clearTool);
const map = useMap();
useMapEvents({ useMapEvents({
click: (e) => { click: (e) => {
if (isPlacingMode) { switch (activeTool) {
onMapClick(e.latlng.lat, e.latlng.lng); case 'ruler':
// Snap to nearest site if within threshold
const clickPoint = map.latLngToContainerPoint(e.latlng);
let snappedLat = e.latlng.lat;
let snappedLon = e.latlng.lng;
for (const site of sites) {
const sitePoint = map.latLngToContainerPoint(L.latLng(site.lat, site.lon));
const pixelDist = clickPoint.distanceTo(sitePoint);
if (pixelDist < SNAP_THRESHOLD_PX) {
snappedLat = site.lat;
snappedLon = site.lon;
break;
}
}
onRulerClick(snappedLat, snappedLon);
break;
case 'rx-placement':
if (onRxPlacement) {
onRxPlacement(e.latlng.lat, e.latlng.lng);
clearTool(); // Single click action
}
break;
case 'site-placement':
onSitePlacement(e.latlng.lat, e.latlng.lng);
clearTool(); // Single click action
break;
case 'none':
default:
// No action on map click — just pan/zoom
break;
} }
}, },
}); });
@@ -38,6 +89,61 @@ function MapClickHandler({
return null; return null;
} }
/**
* Component to apply cursor classes based on active tool
*/
function CursorManager() {
const map = useMap();
const activeTool = useToolStore((s) => s.activeTool);
useEffect(() => {
const container = map.getContainer();
// Remove all tool cursors
container.classList.remove('tool-ruler', 'tool-rx-placement', 'tool-site-placement');
switch (activeTool) {
case 'ruler':
container.classList.add('tool-ruler');
break;
case 'rx-placement':
container.classList.add('tool-rx-placement');
break;
case 'site-placement':
container.classList.add('tool-site-placement');
break;
default:
// Default cursor (arrow)
break;
}
}, [map, activeTool]);
return null;
}
/**
* Right-click handler for ruler mode
*/
function RulerRightClickHandler({ onRightClick }: { onRightClick: () => void }) {
const activeTool = useToolStore((s) => s.activeTool);
const map = useMap();
useEffect(() => {
if (activeTool !== 'ruler') return;
const handleContextMenu = (e: L.LeafletMouseEvent) => {
L.DomEvent.preventDefault(e.originalEvent);
onRightClick();
};
map.on('contextmenu', handleContextMenu);
return () => {
map.off('contextmenu', handleContextMenu);
};
}, [map, activeTool, onRightClick]);
return null;
}
/** /**
* Inner component that exposes the map instance via ref callback * Inner component that exposes the map instance via ref callback
*/ */
@@ -49,23 +155,72 @@ function MapRefSetter({ mapRef }: { mapRef: React.MutableRefObject<LeafletMap |
return null; return null;
} }
export default function MapView({ onMapClick, onEditSite, onProfileRequest, children }: MapViewProps) { export default function MapView({ onSitePlacement, onRxPlacement, onEditSite, onProfileRequest, showLinkBudget, onToggleLinkBudget, children }: MapViewProps) {
const sites = useSitesStore((s) => s.sites); const sites = useSitesStore((s) => s.sites);
const isPlacingMode = useSitesStore((s) => s.isPlacingMode);
const showTerrain = useSettingsStore((s) => s.showTerrain); const showTerrain = useSettingsStore((s) => s.showTerrain);
const terrainOpacity = useSettingsStore((s) => s.terrainOpacity); const terrainOpacity = useSettingsStore((s) => s.terrainOpacity);
const setShowTerrain = useSettingsStore((s) => s.setShowTerrain); const setShowTerrain = useSettingsStore((s) => s.setShowTerrain);
const showGrid = useSettingsStore((s) => s.showGrid); const showGrid = useSettingsStore((s) => s.showGrid);
const setShowGrid = useSettingsStore((s) => s.setShowGrid); const setShowGrid = useSettingsStore((s) => s.setShowGrid);
const measurementMode = useSettingsStore((s) => s.measurementMode);
const setMeasurementMode = useSettingsStore((s) => s.setMeasurementMode);
const showElevationInfo = useSettingsStore((s) => s.showElevationInfo); const showElevationInfo = useSettingsStore((s) => s.showElevationInfo);
const showElevationOverlay = useSettingsStore((s) => s.showElevationOverlay); const showElevationOverlay = useSettingsStore((s) => s.showElevationOverlay);
const setShowElevationOverlay = useSettingsStore((s) => s.setShowElevationOverlay); const setShowElevationOverlay = useSettingsStore((s) => s.setShowElevationOverlay);
const elevationOpacity = useSettingsStore((s) => s.elevationOpacity); const elevationOpacity = useSettingsStore((s) => s.elevationOpacity);
const addToast = useToastStore((s) => s.addToast); const addToast = useToastStore((s) => s.addToast);
// Tool store
const activeTool = useToolStore((s) => s.activeTool);
const setActiveTool = useToolStore((s) => s.setActiveTool);
const clearTool = useToolStore((s) => s.clearTool);
const mapRef = useRef<LeafletMap | null>(null); const mapRef = useRef<LeafletMap | null>(null);
// Ruler points state (managed here since MeasurementTool is now controlled by tool store)
const [rulerPoints, setRulerPoints] = useState<[number, number][]>([]);
// Ruler limited to exactly 2 points (point-to-point measurement)
const handleRulerClick = useCallback((lat: number, lon: number) => {
setRulerPoints(prev => {
if (prev.length === 0) {
// First point
return [[lat, lon]];
} else if (prev.length === 1) {
// Second point — measurement complete
return [prev[0], [lat, lon]];
} else {
// Already 2 points — start new measurement
return [[lat, lon]];
}
});
}, []);
const handleRulerRightClick = useCallback(() => {
if (rulerPoints.length >= 2) {
// Calculate total distance
let total = 0;
for (let i = 1; i < rulerPoints.length; i++) {
const [lat1, lon1] = rulerPoints[i - 1];
const [lat2, lon2] = rulerPoints[i];
const R = 6371;
const dLat = ((lat2 - lat1) * Math.PI) / 180;
const dLon = ((lon2 - lon1) * Math.PI) / 180;
const a = Math.sin(dLat / 2) ** 2 +
Math.cos((lat1 * Math.PI) / 180) * Math.cos((lat2 * Math.PI) / 180) * Math.sin(dLon / 2) ** 2;
total += R * 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
}
addToast(`Distance: ${total.toFixed(2)} km (${(total * 1000).toFixed(0)} m)`, 'info');
}
setRulerPoints([]);
clearTool();
}, [rulerPoints, addToast, clearTool]);
// Clear ruler points when tool changes away from ruler
useEffect(() => {
if (activeTool !== 'ruler') {
setRulerPoints([]);
}
}, [activeTool]);
const handleFitToSites = useCallback(() => { const handleFitToSites = useCallback(() => {
if (sites.length === 0 || !mapRef.current) return; if (sites.length === 0 || !mapRef.current) return;
const bounds = sites.map((site) => [site.lat, site.lon] as [number, number]); const bounds = sites.map((site) => [site.lat, site.lon] as [number, number]);
@@ -76,14 +231,24 @@ export default function MapView({ onMapClick, onEditSite, onProfileRequest, chil
mapRef.current?.setView([48.4, 35.0], 7); mapRef.current?.setView([48.4, 35.0], 7);
}, []); }, []);
// Toggle ruler tool
const handleRulerToggle = useCallback(() => {
if (activeTool === 'ruler') {
clearTool();
} else {
setActiveTool('ruler');
}
}, [activeTool, setActiveTool, clearTool]);
return ( return (
<> <>
<MapContainer <MapContainer
center={[48.4, 35.0]} center={[48.4, 35.0]}
zoom={7} zoom={7}
className={`w-full h-full ${isPlacingMode ? 'cursor-crosshair' : ''}`} className="w-full h-full"
> >
<MapRefSetter mapRef={mapRef} /> <MapRefSetter mapRef={mapRef} />
<CursorManager />
{/* Base OSM layer */} {/* Base OSM layer */}
<TileLayer <TileLayer
attribution='&copy; <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a>' attribution='&copy; <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a>'
@@ -100,16 +265,21 @@ export default function MapView({ onMapClick, onEditSite, onProfileRequest, chil
)} )}
{/* Elevation color overlay from SRTM terrain data */} {/* Elevation color overlay from SRTM terrain data */}
<ElevationLayer visible={showElevationOverlay} opacity={elevationOpacity} /> <ElevationLayer visible={showElevationOverlay} opacity={elevationOpacity} />
<MapClickHandler onMapClick={onMapClick} /> {/* Unified click handler */}
<MapClickHandler
onSitePlacement={onSitePlacement}
onRxPlacement={onRxPlacement}
onRulerClick={handleRulerClick}
sites={sites}
/>
{/* Right-click handler for ruler */}
<RulerRightClickHandler onRightClick={handleRulerRightClick} />
<MapExtras /> <MapExtras />
{showElevationInfo && <ElevationDisplay />} {showElevationInfo && <ElevationDisplay />}
<CoordinateGrid visible={showGrid} /> <CoordinateGrid visible={showGrid} />
{/* Ruler visualization (only points and line, no click handling) */}
<MeasurementTool <MeasurementTool
enabled={measurementMode} points={rulerPoints}
onComplete={(distKm) => {
addToast(`Distance: ${distKm.toFixed(2)} km (${(distKm * 1000).toFixed(0)} m)`, 'info');
setMeasurementMode(false);
}}
onProfileRequest={onProfileRequest} onProfileRequest={onProfileRequest}
/> />
{sites {sites
@@ -163,12 +333,12 @@ export default function MapView({ onMapClick, onEditSite, onProfileRequest, chil
Grid Grid
</button> </button>
<button <button
onClick={() => setMeasurementMode(!measurementMode)} onClick={handleRulerToggle}
className={`bg-white dark:bg-dark-surface shadow-lg rounded px-3 py-2 text-sm className={`bg-white dark:bg-dark-surface shadow-lg rounded px-3 py-2 text-sm
hover:bg-gray-50 dark:hover:bg-dark-border transition-colors hover:bg-gray-50 dark:hover:bg-dark-border transition-colors
text-gray-700 dark:text-dark-text min-h-[36px] text-gray-700 dark:text-dark-text min-h-[36px]
${measurementMode ? 'ring-2 ring-orange-500' : ''}`} ${activeTool === 'ruler' ? 'ring-2 ring-orange-500' : ''}`}
title={measurementMode ? 'Exit measurement mode' : 'Measure distance (click points, right-click to finish)'} title={activeTool === 'ruler' ? 'Exit measurement mode' : 'Measure point-to-point distance'}
> >
Ruler Ruler
</button> </button>
@@ -182,6 +352,18 @@ export default function MapView({ onMapClick, onEditSite, onProfileRequest, chil
> >
Elev Elev
</button> </button>
{onToggleLinkBudget && (
<button
onClick={onToggleLinkBudget}
className={`bg-white dark:bg-dark-surface shadow-lg rounded px-3 py-2 text-sm
hover:bg-gray-50 dark:hover:bg-dark-border transition-colors
text-gray-700 dark:text-dark-text min-h-[36px]
${showLinkBudget ? 'ring-2 ring-purple-500' : ''}`}
title={showLinkBudget ? 'Close Link Budget Calculator' : 'Open Link Budget Calculator'}
>
LB
</button>
)}
</div> </div>
</> </>
); );

View File

@@ -1,10 +1,16 @@
import { useEffect, useRef, useState } from 'react'; /**
import { useMap, Polyline, Marker } from 'react-leaflet'; * Ruler/Measurement Tool Visualization
*
* Pure visualization component - receives points from parent,
* click handling is done by the centralized MapClickHandler.
*/
import { useEffect, useRef } from 'react';
import { Polyline, Marker } from 'react-leaflet';
import L from 'leaflet'; import L from 'leaflet';
interface MeasurementToolProps { interface MeasurementToolProps {
enabled: boolean; points: [number, number][];
onComplete?: (distanceKm: number) => void;
onProfileRequest?: (start: [number, number], end: [number, number]) => void; onProfileRequest?: (start: [number, number], end: [number, number]) => void;
} }
@@ -40,50 +46,18 @@ const dotIcon = L.divIcon({
html: '<div style="width:10px;height:10px;background:white;border:2px solid #333;border-radius:50%;"></div>', html: '<div style="width:10px;height:10px;background:white;border:2px solid #333;border-radius:50%;"></div>',
}); });
export default function MeasurementTool({ enabled, onComplete, onProfileRequest }: MeasurementToolProps) { export default function MeasurementTool({ points, onProfileRequest }: MeasurementToolProps) {
const map = useMap(); const overlayRef = useRef<HTMLDivElement>(null);
const [points, setPoints] = useState<[number, number][]>([]);
const pointsRef = useRef(points);
useEffect(() => {
pointsRef.current = points;
}, [points]);
// Clear on disable // Use Leaflet's DOM event utility to block click propagation to the map
/* eslint-disable react-hooks/set-state-in-effect */
useEffect(() => { useEffect(() => {
if (!enabled) { if (overlayRef.current) {
setPoints([]); L.DomEvent.disableClickPropagation(overlayRef.current);
L.DomEvent.disableScrollPropagation(overlayRef.current);
} }
}, [enabled]); }, [points.length]); // Re-run when overlay appears/disappears
/* eslint-enable react-hooks/set-state-in-effect */
// Click handler: add measurement point if (points.length === 0) return null;
useEffect(() => {
if (!enabled) return;
const handleClick = (e: L.LeafletMouseEvent) => {
setPoints((prev) => [...prev, [e.latlng.lat, e.latlng.lng]]);
};
const handleRightClick = (e: L.LeafletMouseEvent) => {
L.DomEvent.preventDefault(e.originalEvent);
const pts = pointsRef.current;
if (pts.length >= 2 && onComplete) {
onComplete(totalDistance(pts));
}
setPoints([]);
};
map.on('click', handleClick);
map.on('contextmenu', handleRightClick);
return () => {
map.off('click', handleClick);
map.off('contextmenu', handleRightClick);
};
}, [map, enabled, onComplete]);
if (!enabled || points.length === 0) return null;
const dist = totalDistance(points); const dist = totalDistance(points);
@@ -100,6 +74,7 @@ export default function MeasurementTool({ enabled, onComplete, onProfileRequest
))} ))}
{dist > 0 && ( {dist > 0 && (
<div <div
ref={overlayRef}
style={{ style={{
position: 'absolute', position: 'absolute',
top: '10px', top: '10px',
@@ -110,7 +85,6 @@ export default function MeasurementTool({ enabled, onComplete, onProfileRequest
padding: '6px 14px', padding: '6px 14px',
borderRadius: '6px', borderRadius: '6px',
zIndex: 2000, zIndex: 2000,
pointerEvents: 'none',
fontSize: '13px', fontSize: '13px',
fontWeight: 600, fontWeight: 600,
letterSpacing: '0.3px', letterSpacing: '0.3px',
@@ -119,10 +93,7 @@ export default function MeasurementTool({ enabled, onComplete, onProfileRequest
Distance: {dist.toFixed(2)} km ({(dist * 1000).toFixed(0)} m) Distance: {dist.toFixed(2)} km ({(dist * 1000).toFixed(0)} m)
{points.length >= 2 && onProfileRequest && ( {points.length >= 2 && onProfileRequest && (
<button <button
onClick={(e) => { onClick={() => onProfileRequest(points[0], points[points.length - 1])}
e.stopPropagation();
onProfileRequest(points[0], points[points.length - 1]);
}}
style={{ style={{
marginLeft: 10, marginLeft: 10,
background: 'rgba(255,255,255,0.15)', background: 'rgba(255,255,255,0.15)',
@@ -132,7 +103,6 @@ export default function MeasurementTool({ enabled, onComplete, onProfileRequest
borderRadius: 4, borderRadius: 4,
cursor: 'pointer', cursor: 'pointer',
fontSize: 11, fontSize: 11,
pointerEvents: 'auto',
}} }}
> >
Terrain Profile Terrain Profile

View File

@@ -1,51 +1,77 @@
/** /**
* Canvas-based terrain elevation profile viewer. * Canvas-based terrain elevation profile viewer with Fresnel zone visualization.
* *
* Shows elevation cross-section between two geographic points with: * Shows elevation cross-section between two geographic points with:
* - Green filled terrain area * - Green filled terrain area
* - Dashed red LOS line from start to end * - Dashed red LOS line from start to end
* - Optional Fresnel zone ellipse (light blue)
* - Red highlighting where terrain intrudes Fresnel zone
* - Hover tooltip with elevation/distance at cursor * - Hover tooltip with elevation/distance at cursor
* - Stats bar: total distance, min/max elevation * - Stats bar: total distance, min/max elevation, Fresnel status
*/ */
import { useEffect, useRef, useState, useCallback } from 'react'; import { useEffect, useRef, useState, useCallback } from 'react';
import L from 'leaflet';
import { api } from '@/services/api.ts'; import { api } from '@/services/api.ts';
import type { TerrainProfilePoint } from '@/services/api.ts'; import type { FresnelProfileResponse } from '@/services/api.ts';
interface TerrainProfileProps { interface TerrainProfileProps {
start: [number, number]; // [lat, lon] start: [number, number]; // [lat, lon]
end: [number, number]; // [lat, lon] end: [number, number]; // [lat, lon]
txHeight?: number; // TX antenna height (m)
rxHeight?: number; // RX antenna height (m)
frequency?: number; // Frequency (MHz) for Fresnel calculation
onClose: () => void; onClose: () => void;
} }
const CANVAS_W = 600; const CANVAS_W = 600;
const CANVAS_H = 200; const CANVAS_H = 220;
const PAD = { top: 20, right: 20, bottom: 30, left: 50 }; const PAD = { top: 20, right: 20, bottom: 30, left: 50 };
const PLOT_W = CANVAS_W - PAD.left - PAD.right; const PLOT_W = CANVAS_W - PAD.left - PAD.right;
const PLOT_H = CANVAS_H - PAD.top - PAD.bottom; const PLOT_H = CANVAS_H - PAD.top - PAD.bottom;
export default function TerrainProfile({ start, end, onClose }: TerrainProfileProps) { export default function TerrainProfile({
start,
end,
txHeight = 30,
rxHeight = 1.5,
frequency = 1800,
onClose,
}: TerrainProfileProps) {
const canvasRef = useRef<HTMLCanvasElement>(null); const canvasRef = useRef<HTMLCanvasElement>(null);
const [profile, setProfile] = useState<TerrainProfilePoint[] | null>(null); const [fresnelData, setFresnelData] = useState<FresnelProfileResponse | null>(null);
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null); const [error, setError] = useState<string | null>(null);
const [hover, setHover] = useState<{ x: number; idx: number } | null>(null); const [hover, setHover] = useState<{ x: number; idx: number } | null>(null);
const [showFresnel, setShowFresnel] = useState(true);
// Fetch profile data // Fetch Fresnel profile data (includes terrain)
useEffect(() => { useEffect(() => {
setLoading(true); setLoading(true);
setError(null); setError(null);
api api
.getTerrainProfile(start[0], start[1], end[0], end[1], 200) .getFresnelProfile({
.then((data) => { tx_lat: start[0],
setProfile(data); tx_lon: start[1],
tx_height_m: txHeight,
rx_lat: end[0],
rx_lon: end[1],
rx_height_m: rxHeight,
frequency_mhz: frequency,
num_points: 200,
})
.then((data: FresnelProfileResponse) => {
setFresnelData(data);
setLoading(false); setLoading(false);
}) })
.catch((err) => { .catch((err: Error) => {
setError(err.message); setError(err.message);
setLoading(false); setLoading(false);
}); });
}, [start, end]); }, [start, end, txHeight, rxHeight, frequency]);
const profile = fresnelData?.profile;
// Draw chart // Draw chart
const draw = useCallback( const draw = useCallback(
@@ -64,16 +90,24 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
// Clear // Clear
ctx.clearRect(0, 0, CANVAS_W, CANVAS_H); ctx.clearRect(0, 0, CANVAS_W, CANVAS_H);
const elevations = profile.map((p) => p.elevation); const terrainElevs = profile.map((p) => p.terrain_elevation);
const losHeights = profile.map((p) => p.los_height);
const fresnelTops = profile.map((p) => p.fresnel_top);
const fresnelBottoms = profile.map((p) => p.fresnel_bottom);
const distances = profile.map((p) => p.distance); const distances = profile.map((p) => p.distance);
const minElev = Math.min(...elevations);
const maxElev = Math.max(...elevations); // Calculate bounds including Fresnel zone
const allHeights = showFresnel
? [...terrainElevs, ...fresnelTops, ...fresnelBottoms]
: [...terrainElevs, ...losHeights];
const minElev = Math.min(...allHeights);
const maxElev = Math.max(...allHeights);
const maxDist = distances[distances.length - 1] || 1; const maxDist = distances[distances.length - 1] || 1;
// Add 10% padding to elevation range // Add 10% padding to elevation range
const elevRange = maxElev - minElev || 1; const elevRange = maxElev - minElev || 1;
const eMin = minElev - elevRange * 0.1; const eMin = minElev - elevRange * 0.1;
const eMax = maxElev + elevRange * 0.1; const eMax = maxElev + elevRange * 0.15;
const xScale = (d: number) => PAD.left + (d / maxDist) * PLOT_W; const xScale = (d: number) => PAD.left + (d / maxDist) * PLOT_W;
const yScale = (e: number) => PAD.top + PLOT_H - ((e - eMin) / (eMax - eMin)) * PLOT_H; const yScale = (e: number) => PAD.top + PLOT_H - ((e - eMin) / (eMax - eMin)) * PLOT_H;
@@ -90,11 +124,48 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
ctx.stroke(); ctx.stroke();
} }
// Fresnel zone fill (light blue)
if (showFresnel) {
ctx.beginPath();
// Top boundary (left to right)
ctx.moveTo(xScale(distances[0]), yScale(fresnelTops[0]));
for (let i = 1; i < profile.length; i++) {
ctx.lineTo(xScale(distances[i]), yScale(fresnelTops[i]));
}
// Bottom boundary (right to left)
for (let i = profile.length - 1; i >= 0; i--) {
ctx.lineTo(xScale(distances[i]), yScale(fresnelBottoms[i]));
}
ctx.closePath();
ctx.fillStyle = 'rgba(59, 130, 246, 0.15)';
ctx.fill();
// Fresnel boundaries (dashed)
ctx.setLineDash([3, 3]);
ctx.strokeStyle = 'rgba(59, 130, 246, 0.4)';
ctx.lineWidth = 1;
ctx.beginPath();
ctx.moveTo(xScale(distances[0]), yScale(fresnelTops[0]));
for (let i = 1; i < profile.length; i++) {
ctx.lineTo(xScale(distances[i]), yScale(fresnelTops[i]));
}
ctx.stroke();
ctx.beginPath();
ctx.moveTo(xScale(distances[0]), yScale(fresnelBottoms[0]));
for (let i = 1; i < profile.length; i++) {
ctx.lineTo(xScale(distances[i]), yScale(fresnelBottoms[i]));
}
ctx.stroke();
ctx.setLineDash([]);
}
// Terrain fill // Terrain fill
ctx.beginPath(); ctx.beginPath();
ctx.moveTo(xScale(distances[0]), yScale(elevations[0])); ctx.moveTo(xScale(distances[0]), yScale(terrainElevs[0]));
for (let i = 1; i < profile.length; i++) { for (let i = 1; i < profile.length; i++) {
ctx.lineTo(xScale(distances[i]), yScale(elevations[i])); ctx.lineTo(xScale(distances[i]), yScale(terrainElevs[i]));
} }
ctx.lineTo(xScale(distances[distances.length - 1]), PAD.top + PLOT_H); ctx.lineTo(xScale(distances[distances.length - 1]), PAD.top + PLOT_H);
ctx.lineTo(xScale(distances[0]), PAD.top + PLOT_H); ctx.lineTo(xScale(distances[0]), PAD.top + PLOT_H);
@@ -102,25 +173,39 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
ctx.fillStyle = 'rgba(34, 197, 94, 0.3)'; ctx.fillStyle = 'rgba(34, 197, 94, 0.3)';
ctx.fill(); ctx.fill();
// Highlight Fresnel intrusions (red fill)
if (showFresnel) {
for (let i = 0; i < profile.length; i++) {
if (profile[i].clearance < 0) {
const x = xScale(distances[i]);
const yTerrain = yScale(terrainElevs[i]);
const yFresnel = yScale(fresnelBottoms[i]);
const intrusion = Math.min(yFresnel - yTerrain, 20);
if (intrusion > 0) {
ctx.fillStyle = 'rgba(239, 68, 68, 0.4)';
ctx.fillRect(x - 1, yTerrain, 3, intrusion);
}
}
}
}
// Terrain line // Terrain line
ctx.beginPath(); ctx.beginPath();
ctx.moveTo(xScale(distances[0]), yScale(elevations[0])); ctx.moveTo(xScale(distances[0]), yScale(terrainElevs[0]));
for (let i = 1; i < profile.length; i++) { for (let i = 1; i < profile.length; i++) {
ctx.lineTo(xScale(distances[i]), yScale(elevations[i])); ctx.lineTo(xScale(distances[i]), yScale(terrainElevs[i]));
} }
ctx.strokeStyle = '#16a34a'; ctx.strokeStyle = '#16a34a';
ctx.lineWidth = 1.5; ctx.lineWidth = 1.5;
ctx.stroke(); ctx.stroke();
// LOS dashed line (start elevation to end elevation) // LOS line (solid)
ctx.beginPath(); ctx.beginPath();
ctx.setLineDash([6, 4]); ctx.moveTo(xScale(distances[0]), yScale(losHeights[0]));
ctx.moveTo(xScale(distances[0]), yScale(elevations[0])); ctx.lineTo(xScale(distances[distances.length - 1]), yScale(losHeights[losHeights.length - 1]));
ctx.lineTo(xScale(distances[distances.length - 1]), yScale(elevations[elevations.length - 1]));
ctx.strokeStyle = '#ef4444'; ctx.strokeStyle = '#ef4444';
ctx.lineWidth = 1.5; ctx.lineWidth = 1.5;
ctx.stroke(); ctx.stroke();
ctx.setLineDash([]);
// Y axis labels // Y axis labels
ctx.fillStyle = '#6b7280'; ctx.fillStyle = '#6b7280';
@@ -147,7 +232,7 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
if (hoverIdx !== null && hoverIdx >= 0 && hoverIdx < profile.length) { if (hoverIdx !== null && hoverIdx >= 0 && hoverIdx < profile.length) {
const p = profile[hoverIdx]; const p = profile[hoverIdx];
const hx = xScale(p.distance); const hx = xScale(p.distance);
const hy = yScale(p.elevation); const hy = yScale(p.terrain_elevation);
// Vertical line // Vertical line
ctx.beginPath(); ctx.beginPath();
@@ -157,14 +242,15 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
ctx.lineWidth = 1; ctx.lineWidth = 1;
ctx.stroke(); ctx.stroke();
// Dot // Dot on terrain
ctx.beginPath(); ctx.beginPath();
ctx.arc(hx, hy, 4, 0, Math.PI * 2); ctx.arc(hx, hy, 4, 0, Math.PI * 2);
ctx.fillStyle = '#2563eb'; ctx.fillStyle = '#2563eb';
ctx.fill(); ctx.fill();
// Tooltip // Tooltip with clearance info
const text = `${Math.round(p.elevation)}m @ ${(p.distance / 1000).toFixed(2)}km`; const clearanceText = showFresnel ? ` | F1: ${p.clearance >= 0 ? '+' : ''}${p.clearance.toFixed(0)}m` : '';
const text = `${Math.round(p.terrain_elevation)}m @ ${(p.distance / 1000).toFixed(2)}km${clearanceText}`;
ctx.font = 'bold 11px monospace'; ctx.font = 'bold 11px monospace';
const tw = ctx.measureText(text).width + 10; const tw = ctx.measureText(text).width + 10;
const tx = Math.min(hx + 8, CANVAS_W - tw - 4); const tx = Math.min(hx + 8, CANVAS_W - tw - 4);
@@ -173,13 +259,13 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
ctx.beginPath(); ctx.beginPath();
ctx.roundRect(tx, ty, tw, 18, 3); ctx.roundRect(tx, ty, tw, 18, 3);
ctx.fill(); ctx.fill();
ctx.fillStyle = 'white'; ctx.fillStyle = p.clearance < 0 && showFresnel ? '#fca5a5' : 'white';
ctx.textAlign = 'left'; ctx.textAlign = 'left';
ctx.textBaseline = 'middle'; ctx.textBaseline = 'middle';
ctx.fillText(text, tx + 5, ty + 9); ctx.fillText(text, tx + 5, ty + 9);
} }
}, },
[profile] [profile, showFresnel]
); );
// Re-draw on profile load or hover change // Re-draw on profile load or hover change
@@ -210,12 +296,40 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
const handleMouseLeave = useCallback(() => setHover(null), []); const handleMouseLeave = useCallback(() => setHover(null), []);
// Stats // Stats
const minElev = profile ? Math.min(...profile.map((p) => p.elevation)) : 0; const minElev = profile ? Math.min(...profile.map((p) => p.terrain_elevation)) : 0;
const maxElev = profile ? Math.max(...profile.map((p) => p.elevation)) : 0; const maxElev = profile ? Math.max(...profile.map((p) => p.terrain_elevation)) : 0;
const totalDist = profile && profile.length > 0 ? profile[profile.length - 1].distance : 0; const totalDist = fresnelData?.total_distance_m ?? 0;
// Status badge
const getStatusBadge = () => {
if (!fresnelData) return null;
if (fresnelData.los_clear && fresnelData.fresnel_clear) {
return <span className="text-green-600 dark:text-green-400 font-medium">LOS Clear</span>;
} else if (fresnelData.los_clear) {
return (
<span className="text-yellow-600 dark:text-yellow-400 font-medium">
F1 {fresnelData.fresnel_clear_pct}% Clear
</span>
);
} else {
return <span className="text-red-500 font-medium">LOS Blocked</span>;
}
};
// Ref for the container to block Leaflet events
const containerRef = useRef<HTMLDivElement>(null);
// Use Leaflet's DOM event utility to block click propagation to the map
useEffect(() => {
if (containerRef.current) {
L.DomEvent.disableClickPropagation(containerRef.current);
L.DomEvent.disableScrollPropagation(containerRef.current);
}
}, []);
return ( return (
<div <div
ref={containerRef}
className="absolute bottom-6 left-1/2 -translate-x-1/2 z-[1500] className="absolute bottom-6 left-1/2 -translate-x-1/2 z-[1500]
bg-white dark:bg-dark-surface rounded-lg shadow-xl border border-gray-200 dark:border-dark-border bg-white dark:bg-dark-surface rounded-lg shadow-xl border border-gray-200 dark:border-dark-border
overflow-hidden" overflow-hidden"
@@ -223,9 +337,20 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
> >
{/* Header */} {/* Header */}
<div className="flex items-center justify-between px-3 py-2 border-b border-gray-100 dark:border-dark-border"> <div className="flex items-center justify-between px-3 py-2 border-b border-gray-100 dark:border-dark-border">
<span className="text-xs font-semibold text-gray-700 dark:text-dark-text"> <div className="flex items-center gap-3">
Terrain Profile <span className="text-xs font-semibold text-gray-700 dark:text-dark-text">
</span> Terrain Profile
</span>
<label className="flex items-center gap-1.5 text-[10px] text-gray-500 cursor-pointer">
<input
type="checkbox"
checked={showFresnel}
onChange={(e) => setShowFresnel(e.target.checked)}
className="w-3 h-3"
/>
Fresnel Zone ({frequency} MHz)
</label>
</div>
<button <button
onClick={onClose} onClick={onClose}
className="text-gray-400 hover:text-gray-600 dark:hover:text-white text-sm w-6 h-6 flex items-center justify-center rounded hover:bg-gray-100 dark:hover:bg-dark-border" className="text-gray-400 hover:text-gray-600 dark:hover:text-white text-sm w-6 h-6 flex items-center justify-center rounded hover:bg-gray-100 dark:hover:bg-dark-border"
@@ -237,12 +362,12 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
{/* Canvas */} {/* Canvas */}
<div className="px-2 py-1"> <div className="px-2 py-1">
{loading && ( {loading && (
<div className="flex items-center justify-center h-[200px] text-sm text-gray-400"> <div className="flex items-center justify-center h-[220px] text-sm text-gray-400">
Loading profile... Loading profile...
</div> </div>
)} )}
{error && ( {error && (
<div className="flex items-center justify-center h-[200px] text-sm text-red-400"> <div className="flex items-center justify-center h-[220px] text-sm text-red-400">
{error} {error}
</div> </div>
)} )}
@@ -262,9 +387,17 @@ export default function TerrainProfile({ start, end, onClose }: TerrainProfilePr
<span>Distance: {(totalDist / 1000).toFixed(2)} km</span> <span>Distance: {(totalDist / 1000).toFixed(2)} km</span>
<span>Min: {Math.round(minElev)} m</span> <span>Min: {Math.round(minElev)} m</span>
<span>Max: {Math.round(maxElev)} m</span> <span>Max: {Math.round(maxElev)} m</span>
<span> {showFresnel && fresnelData && (
LOS: {profile[0].elevation <= profile[profile.length - 1].elevation ? 'Uphill' : 'Downhill'} <span>Clearance: {fresnelData.worst_clearance_m.toFixed(0)} m</span>
</span> )}
{getStatusBadge()}
</div>
)}
{/* Recommendation */}
{showFresnel && fresnelData && !fresnelData.fresnel_clear && (
<div className="px-3 py-1.5 text-[10px] bg-yellow-50 dark:bg-yellow-900/20 text-yellow-700 dark:text-yellow-300 border-t border-yellow-200 dark:border-yellow-800">
{fresnelData.recommendation} (~{fresnelData.estimated_loss_db.toFixed(1)} dB loss)
</div> </div>
)} )}
</div> </div>

View File

@@ -0,0 +1,669 @@
/**
* WebGL coverage layer using texture-based value interpolation.
*
* Simple approach (like CloudRF surface raster):
* 1. Create texture where each pixel = one grid cell's RSRP value
* 2. GPU's GL_LINEAR filtering interpolates between adjacent cells
* 3. Fragment shader maps interpolated value to color gradient
*/
import { useEffect, useRef, useMemo, useCallback } from 'react';
import { useMap } from 'react-leaflet';
export interface CoveragePoint {
lat: number;
lon: number;
rsrp: number;
}
interface WebGLCoverageLayerProps {
points: CoveragePoint[];
opacity: number;
minRsrp?: number;
maxRsrp?: number;
visible: boolean;
onWebGLFailed?: () => void;
}
const VERTEX_SHADER = `
attribute vec2 a_position;
varying vec2 v_uv;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
// Map position to UV, flip Y
v_uv = vec2((a_position.x + 1.0) * 0.5, 1.0 - (a_position.y + 1.0) * 0.5);
}
`;
// Fragment shader with smoothstep interpolation for C2 continuity
// This removes visible grid edges with minimal performance cost
const FRAGMENT_SHADER = `
precision mediump float;
uniform sampler2D u_coverage;
uniform vec2 u_textureSize;
varying vec2 v_uv;
// Quintic Hermite smoothstep - gives C2 continuity (smooth 2nd derivatives)
// This removes visible "seams" between grid cells
vec4 textureSmooth(sampler2D tex, vec2 uv, vec2 texSize) {
vec2 p = uv * texSize + 0.5;
vec2 i = floor(p);
vec2 f = p - i;
// Quintic hermite curve: f³(6f² - 15f + 10)
f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);
return texture2D(tex, (i + f - 0.5) / texSize);
}
// RSRP to color gradient (red -> orange -> yellow -> green -> cyan)
// Applied AFTER interpolation for clean gradients
vec3 rsrpToColor(float t) {
// t: 0 = weak (red), 1 = strong (cyan)
if (t < 0.25) return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), t / 0.25);
if (t < 0.5) return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.25) / 0.25);
if (t < 0.75) return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.5) / 0.25);
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.75) / 0.25);
}
void main() {
// 1. Sample with smoothstep interpolation (RAW RSRP value)
vec4 texel = textureSmooth(u_coverage, v_uv, u_textureSize);
// 2. Alpha channel indicates coverage presence
if (texel.a < 0.1) discard;
// 3. Apply colormap AFTER interpolation (critical for clean gradients)
float rsrp = texel.r;
vec3 color = rsrpToColor(rsrp);
// 4. Smooth boundary fading
float boundaryAlpha = smoothstep(0.01, 0.05, rsrp);
gl_FragColor = vec4(color, boundaryAlpha * 0.85);
}
`;
function compileShader(gl: WebGLRenderingContext, source: string, type: number): WebGLShader | null {
const shader = gl.createShader(type);
if (!shader) return null;
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error('Shader error:', gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
function createProgram(gl: WebGLRenderingContext): WebGLProgram | null {
const vs = compileShader(gl, VERTEX_SHADER, gl.VERTEX_SHADER);
const fs = compileShader(gl, FRAGMENT_SHADER, gl.FRAGMENT_SHADER);
if (!vs || !fs) return null;
const program = gl.createProgram();
if (!program) return null;
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error('Program error:', gl.getProgramInfoLog(program));
return null;
}
return program;
}
interface GridInfo {
width: number;
height: number;
minLat: number;
maxLat: number;
minLon: number;
maxLon: number;
latStep: number;
lonStep: number;
}
function detectGrid(points: CoveragePoint[]): GridInfo | null {
if (points.length < 4) return null;
// Calculate bounds directly from points (no rounding)
let minLat = Infinity, maxLat = -Infinity;
let minLon = Infinity, maxLon = -Infinity;
for (const p of points) {
if (p.lat < minLat) minLat = p.lat;
if (p.lat > maxLat) maxLat = p.lat;
if (p.lon < minLon) minLon = p.lon;
if (p.lon > maxLon) maxLon = p.lon;
}
// Find grid step by looking at sorted unique coordinates
const lats = new Set<number>();
const lons = new Set<number>();
for (const p of points) {
lats.add(Math.round(p.lat * 1000000) / 1000000); // 6 decimal places
lons.add(Math.round(p.lon * 1000000) / 1000000);
}
const sortedLats = Array.from(lats).sort((a, b) => a - b);
const sortedLons = Array.from(lons).sort((a, b) => a - b);
// Calculate step from median difference between adjacent points
const latDiffs: number[] = [];
const lonDiffs: number[] = [];
for (let i = 1; i < sortedLats.length; i++) {
latDiffs.push(sortedLats[i] - sortedLats[i-1]);
}
for (let i = 1; i < sortedLons.length; i++) {
lonDiffs.push(sortedLons[i] - sortedLons[i-1]);
}
latDiffs.sort((a, b) => a - b);
lonDiffs.sort((a, b) => a - b);
const latStep = latDiffs[Math.floor(latDiffs.length / 2)] || (maxLat - minLat) / 10;
const lonStep = lonDiffs[Math.floor(lonDiffs.length / 2)] || (maxLon - minLon) / 10;
// Calculate grid dimensions from actual extent and step
const width = Math.max(2, Math.round((maxLon - minLon) / lonStep) + 1);
const height = Math.max(2, Math.round((maxLat - minLat) / latStep) + 1);
return {
width,
height,
minLat,
maxLat,
minLon,
maxLon,
latStep,
lonStep,
};
}
interface TextureResult {
texture: WebGLTexture;
width: number;
height: number;
}
function createCoverageTexture(
gl: WebGLRenderingContext,
points: CoveragePoint[],
grid: GridInfo,
minRsrp: number,
maxRsrp: number
): TextureResult | null {
const { width, height, minLat, maxLat, minLon, maxLon } = grid;
const latRange = maxLat - minLat;
const lonRange = maxLon - minLon;
const rsrpRange = maxRsrp - minRsrp;
// Step 1: Create sparse grid with actual point positions
// Store normalized RSRP value (0-1) at each grid cell that has data
const sparseGrid = new Map<number, number>(); // key = gy * width + gx, value = normalized RSRP
for (const p of points) {
const gx = Math.round((p.lon - minLon) / lonRange * (width - 1));
const gy = Math.round((p.lat - minLat) / latRange * (height - 1));
if (gx >= 0 && gx < width && gy >= 0 && gy < height) {
const normalized = Math.max(0, Math.min(1, (p.rsrp - minRsrp) / rsrpRange));
const key = gy * width + gx;
// Keep the stronger signal if multiple points map to same cell
if (!sparseGrid.has(key) || sparseGrid.get(key)! < normalized) {
sparseGrid.set(key, normalized);
}
}
}
// Step 2: For each empty cell, find nearest filled cell using expanding search
// This fills the circular coverage area properly
const data = new Uint8Array(width * height * 4);
const maxSearchRadius = Math.max(width, height); // Max distance to search
let filledCount = 0;
for (let gy = 0; gy < height; gy++) {
for (let gx = 0; gx < width; gx++) {
const key = gy * width + gx;
if (sparseGrid.has(key)) {
// Cell has actual data
const value = Math.round(sparseGrid.get(key)! * 255);
const idx = key * 4;
data[idx] = value;
data[idx + 1] = 0;
data[idx + 2] = 0;
data[idx + 3] = 255;
filledCount++;
} else {
// Find nearest cell with data using expanding square search
let found = false;
let nearestValue = 0;
let nearestDistSq = Infinity;
// Search in expanding radius
for (let r = 1; r <= maxSearchRadius && !found; r++) {
// Check cells at distance r (square perimeter)
for (let dy = -r; dy <= r && !found; dy++) {
for (let dx = -r; dx <= r; dx++) {
// Only check perimeter cells (optimization)
if (Math.abs(dx) !== r && Math.abs(dy) !== r) continue;
const nx = gx + dx;
const ny = gy + dy;
if (nx < 0 || nx >= width || ny < 0 || ny >= height) continue;
const nkey = ny * width + nx;
if (sparseGrid.has(nkey)) {
const distSq = dx * dx + dy * dy;
if (distSq < nearestDistSq) {
nearestDistSq = distSq;
nearestValue = sparseGrid.get(nkey)!;
}
}
}
}
// If we found something at this radius, use it (nearest neighbor)
if (nearestDistSq < Infinity) {
found = true;
}
}
if (found) {
// Fill with nearest neighbor value
// Apply distance-based alpha fade for smooth edges
const dist = Math.sqrt(nearestDistSq);
const maxDist = 3; // Fade out over 3 cells
const alpha = dist <= maxDist ? 255 : Math.max(0, 255 - (dist - maxDist) * 50);
const value = Math.round(nearestValue * 255);
const idx = key * 4;
data[idx] = value;
data[idx + 1] = 0;
data[idx + 2] = 0;
data[idx + 3] = Math.round(alpha);
filledCount++;
}
// If not found, leave as transparent (alpha = 0)
}
}
}
console.log('[WebGL] Texture created (nearest-neighbor filled):', {
textureSize: `${width}x${height}`,
originalPoints: sparseGrid.size,
filledCells: filledCount,
totalCells: width * height,
fillPercent: (filledCount / (width * height) * 100).toFixed(1) + '%'
});
const texture = gl.createTexture();
if (!texture) return null;
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, data);
// LINEAR filtering for smooth interpolation between filled cells
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return { texture, width, height };
}
export default function WebGLCoverageLayer({
points,
opacity,
minRsrp = -130,
maxRsrp = -50,
visible,
onWebGLFailed,
}: WebGLCoverageLayerProps) {
const map = useMap();
// Refs for WebGL resources
const canvasRef = useRef<HTMLCanvasElement | null>(null);
const glRef = useRef<WebGLRenderingContext | null>(null);
const programRef = useRef<WebGLProgram | null>(null);
const textureRef = useRef<WebGLTexture | null>(null);
const quadBufferRef = useRef<WebGLBuffer | null>(null);
// Track what data the current texture was built from
const lastPointsHashRef = useRef<string>('');
const boundsRef = useRef<{ minLat: number; maxLat: number; minLon: number; maxLon: number } | null>(null);
const textureSizeRef = useRef<{ width: number; height: number }>({ width: 1, height: 1 });
// Stable ref for callback to avoid re-initialization
const onWebGLFailedRef = useRef(onWebGLFailed);
onWebGLFailedRef.current = onWebGLFailed;
// Track if initialized to prevent re-runs
const initializedRef = useRef(false);
// Compute stable hash for points data
const pointsHash = useMemo(() => {
if (points.length === 0) return '';
const first = points[0];
const last = points[points.length - 1];
return `${points.length}:${first.lat.toFixed(5)}:${last.lon.toFixed(5)}:${first.rsrp.toFixed(1)}`;
}, [points]);
// Render function - only draws, no resource creation
const render = useCallback(() => {
const canvas = canvasRef.current;
const gl = glRef.current;
const program = programRef.current;
const texture = textureRef.current;
const bounds = boundsRef.current;
// DEBUG: Check what's missing if we can't render
if (!canvas || !gl || !program || !texture || !bounds) {
console.log('[WebGL] Render skipped - missing:', {
canvas: !!canvas,
gl: !!gl,
program: !!program,
texture: !!texture,
bounds: !!bounds
});
return;
}
// Position canvas over coverage area
const nw = map.latLngToLayerPoint([bounds.maxLat, bounds.minLon]);
const se = map.latLngToLayerPoint([bounds.minLat, bounds.maxLon]);
const width = Math.abs(se.x - nw.x);
const height = Math.abs(se.y - nw.y);
if (width < 1 || height < 1) return;
canvas.style.transform = `translate(${nw.x}px, ${nw.y}px)`;
canvas.style.width = `${width}px`;
canvas.style.height = `${height}px`;
// DEBUG: Log every reposition
console.log('[WebGL] Canvas repositioned:', {
transform: canvas.style.transform,
width: canvas.style.width,
height: canvas.style.height,
zoom: map.getZoom()
});
// Get texture size for shader uniform
const texSize = textureSizeRef.current;
// Set canvas resolution
const dpr = Math.min(window.devicePixelRatio || 1, 2);
const canvasW = Math.min(Math.round(width * dpr), 2048);
const canvasH = Math.min(Math.round(height * dpr), 2048);
if (canvas.width !== canvasW || canvas.height !== canvasH) {
canvas.width = canvasW;
canvas.height = canvasH;
}
// Render
gl.viewport(0, 0, canvasW, canvasH);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(program);
// Bind quad buffer
gl.bindBuffer(gl.ARRAY_BUFFER, quadBufferRef.current);
const posLoc = gl.getAttribLocation(program, 'a_position');
gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);
// Bind texture
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(gl.getUniformLocation(program, 'u_coverage'), 0);
// Set texture size uniform (texSize already defined above for blur)
const textureSizeLocation = gl.getUniformLocation(program, 'u_textureSize');
if (textureSizeLocation) {
gl.uniform2f(textureSizeLocation, texSize.width, texSize.height);
}
// Draw
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
gl.disableVertexAttribArray(posLoc);
}, [map]);
// Effect 1: Initialize WebGL (canvas, context, program, quad buffer) - runs ONCE
useEffect(() => {
if (!visible) return;
// Skip if already initialized
if (initializedRef.current && canvasRef.current && glRef.current) {
return;
}
const pane = map.getPane('overlayPane');
if (!pane) return;
// Create canvas if needed
if (!canvasRef.current) {
// Remove any leftover canvas elements from previous sessions
const existingCanvases = pane.querySelectorAll('canvas.webgl-coverage');
existingCanvases.forEach(c => c.remove());
console.log('[WebGL] Removed', existingCanvases.length, 'leftover canvas elements');
const canvas = document.createElement('canvas');
canvas.className = 'webgl-coverage'; // Add class for identification
canvas.style.position = 'absolute';
canvas.style.pointerEvents = 'none';
canvas.style.transformOrigin = '0 0';
pane.appendChild(canvas);
canvasRef.current = canvas;
}
const canvas = canvasRef.current;
// Initialize WebGL if needed
if (!glRef.current) {
const gl = canvas.getContext('webgl', { alpha: true, premultipliedAlpha: false });
if (!gl) {
console.error('[WebGL] WebGL not available');
onWebGLFailedRef.current?.();
return;
}
glRef.current = gl;
gl.enable(gl.BLEND);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);
}
const gl = glRef.current;
// Create program if needed
if (!programRef.current) {
const program = createProgram(gl);
if (!program) {
console.error('[WebGL] Failed to create program');
onWebGLFailedRef.current?.();
return;
}
programRef.current = program;
}
// Create quad buffer if needed
if (!quadBufferRef.current) {
const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
-1, -1, 1, -1, -1, 1, 1, 1
]), gl.STATIC_DRAW);
quadBufferRef.current = buf;
}
initializedRef.current = true;
console.log('[WebGL] Initialized (should appear ONCE)');
}, [visible, map]); // Removed onWebGLFailed - use ref instead
// Effect 2: Create texture when points data changes
useEffect(() => {
if (!visible || points.length === 0 || !glRef.current) return;
// Skip if same data
if (pointsHash === lastPointsHashRef.current && textureRef.current) {
return;
}
const gl = glRef.current;
const grid = detectGrid(points);
if (!grid) return;
// Delete old texture
if (textureRef.current) {
gl.deleteTexture(textureRef.current);
textureRef.current = null;
}
// Create new texture (returns texture + dimensions)
const result = createCoverageTexture(gl, points, grid, minRsrp, maxRsrp);
if (!result) {
console.error('[WebGL] Failed to create texture');
return;
}
textureRef.current = result.texture;
lastPointsHashRef.current = pointsHash;
// Store texture size for shader uniform
textureSizeRef.current = { width: result.width, height: result.height };
// Store bounds for rendering (with half-cell padding)
const canvasBounds = {
minLat: grid.minLat - grid.latStep / 2,
maxLat: grid.maxLat + grid.latStep / 2,
minLon: grid.minLon - grid.lonStep / 2,
maxLon: grid.maxLon + grid.lonStep / 2,
};
boundsRef.current = canvasBounds;
// FULL DEBUG: Compare data extent vs canvas bounds
const lats = points.map(p => p.lat);
const lons = points.map(p => p.lon);
const dataMinLat = Math.min(...lats);
const dataMaxLat = Math.max(...lats);
const dataMinLon = Math.min(...lons);
const dataMaxLon = Math.max(...lons);
console.log('[WebGL] FULL DEBUG:', {
// Data extent (actual points)
dataMinLat: dataMinLat.toFixed(6),
dataMaxLat: dataMaxLat.toFixed(6),
dataMinLon: dataMinLon.toFixed(6),
dataMaxLon: dataMaxLon.toFixed(6),
dataLatRange: (dataMaxLat - dataMinLat).toFixed(6),
dataLonRange: (dataMaxLon - dataMinLon).toFixed(6),
// Grid detection result
gridWidth: grid.width,
gridHeight: grid.height,
gridMinLat: grid.minLat.toFixed(6),
gridMaxLat: grid.maxLat.toFixed(6),
gridMinLon: grid.minLon.toFixed(6),
gridMaxLon: grid.maxLon.toFixed(6),
gridLatStep: grid.latStep.toFixed(6),
gridLonStep: grid.lonStep.toFixed(6),
// Texture size
textureWidth: result.width,
textureHeight: result.height,
// Canvas bounds (what we use for rendering)
canvasMinLat: canvasBounds.minLat.toFixed(6),
canvasMaxLat: canvasBounds.maxLat.toFixed(6),
canvasMinLon: canvasBounds.minLon.toFixed(6),
canvasMaxLon: canvasBounds.maxLon.toFixed(6),
canvasLatRange: (canvasBounds.maxLat - canvasBounds.minLat).toFixed(6),
canvasLonRange: (canvasBounds.maxLon - canvasBounds.minLon).toFixed(6),
// Comparison
latCoveragePercent: ((canvasBounds.maxLat - canvasBounds.minLat) / (dataMaxLat - dataMinLat) * 100).toFixed(1) + '%',
lonCoveragePercent: ((canvasBounds.maxLon - canvasBounds.minLon) / (dataMaxLon - dataMinLon) * 100).toFixed(1) + '%',
// Expected
expectedRange: '~0.18 degrees for 20km radius',
pointCount: points.length
});
// Initial render
render();
}, [visible, points, pointsHash, minRsrp, maxRsrp, render]);
// Effect 3: Set up map event listeners for re-rendering on move/zoom
// Note: Set up listeners even without texture - render() will check for texture
useEffect(() => {
if (!visible) return;
let frameId = 0;
let moveCount = 0;
const onMapChange = () => {
moveCount++;
if (moveCount <= 3 || moveCount % 10 === 0) {
console.log('[WebGL] Map event #' + moveCount + ', triggering render');
}
cancelAnimationFrame(frameId);
frameId = requestAnimationFrame(render);
};
map.on('move', onMapChange);
map.on('zoom', onMapChange);
map.on('resize', onMapChange);
console.log('[WebGL] Map listeners attached');
return () => {
map.off('move', onMapChange);
map.off('zoom', onMapChange);
map.off('resize', onMapChange);
cancelAnimationFrame(frameId);
console.log('[WebGL] Map listeners detached');
};
}, [visible, map, render]);
// Effect 4: Update opacity without recreating anything
useEffect(() => {
if (canvasRef.current) {
canvasRef.current.style.opacity = String(opacity);
}
}, [opacity]);
// Effect 5: Hide/show canvas based on visibility
useEffect(() => {
if (canvasRef.current) {
canvasRef.current.style.display = visible ? 'block' : 'none';
}
}, [visible]);
// Cleanup on unmount
useEffect(() => {
return () => {
const gl = glRef.current;
if (gl) {
if (textureRef.current) gl.deleteTexture(textureRef.current);
if (quadBufferRef.current) gl.deleteBuffer(quadBufferRef.current);
if (programRef.current) gl.deleteProgram(programRef.current);
}
if (canvasRef.current) {
canvasRef.current.remove();
canvasRef.current = null;
}
glRef.current = null;
programRef.current = null;
textureRef.current = null;
quadBufferRef.current = null;
};
}, []);
return null;
}

View File

@@ -0,0 +1,632 @@
/**
* WebGL Radial Gradients Coverage Layer
*
* Uses multi-pass additive blending to render smooth radial gradients
* around each coverage point, similar to Canvas GeographicHeatmap but GPU-accelerated.
*
* Approach:
* 1. Render each point as a quad with radial falloff (only when data changes)
* 2. Use additive blending to accumulate (weight * rsrp, weight)
* 3. Final pass: normalize and apply colormap (on every frame)
*/
import { useEffect, useRef, useMemo, useCallback } from 'react';
import { useMap } from 'react-leaflet';
// Logging: 0=off, 1=errors, 2=info, 3=debug
const LOG_LEVEL = 2;
const log = (level: number, ...args: unknown[]) => {
if (level <= LOG_LEVEL) console.log('[WebGL Radial]', ...args);
};
export interface CoveragePoint {
lat: number;
lon: number;
rsrp: number;
}
interface WebGLRadialCoverageLayerProps {
points: CoveragePoint[];
opacity: number;
minRsrp?: number;
maxRsrp?: number;
visible: boolean;
radiusMeters?: number;
onWebGLFailed?: () => void;
}
// Point accumulation vertex shader
const POINT_VERTEX_SHADER = `
attribute vec2 a_position; // quad vertices (-1 to 1)
attribute vec2 a_pointPos; // point position in normalized coords
attribute float a_pointRsrp; // normalized RSRP (0-1)
attribute float a_pointRadius; // radius in normalized coords
varying vec2 v_localPos;
varying float v_rsrp;
void main() {
// Expand quad around point center
vec2 pos = a_pointPos + a_position * a_pointRadius;
gl_Position = vec4(pos * 2.0 - 1.0, 0.0, 1.0); // Map 0-1 to clip space -1 to 1
v_localPos = a_position; // -1 to 1 within the quad
v_rsrp = a_pointRsrp;
}
`;
// Point accumulation fragment shader
const POINT_FRAGMENT_SHADER = `
precision highp float;
varying vec2 v_localPos;
varying float v_rsrp;
void main() {
// Radial distance from center (0 at center, 1 at edge)
float dist = length(v_localPos);
// Discard outside circle
if (dist > 1.0) discard;
// Radial falloff - softer gaussian for better edge coverage
// exp(-2) = 0.135 at edge vs exp(-3) = 0.05, giving more contribution from edge points
float weight = exp(-dist * dist * 2.0);
// Output: (weight * rsrp, weight, 0, 0)
// Using RG channels for accumulation
gl_FragColor = vec4(weight * v_rsrp, weight, 0.0, 1.0);
}
`;
// Final compositing vertex shader
const COMPOSITE_VERTEX_SHADER = `
attribute vec2 a_position;
varying vec2 v_uv;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
v_uv = (a_position + 1.0) * 0.5;
}
`;
// Final compositing fragment shader
const COMPOSITE_FRAGMENT_SHADER = `
precision highp float;
uniform sampler2D u_accumTexture;
uniform float u_opacity;
varying vec2 v_uv;
vec3 rsrpToColor(float t) {
// t: 0 = weak (red), 1 = strong (cyan)
if (t < 0.25) return mix(vec3(1.0, 0.0, 0.0), vec3(1.0, 0.5, 0.0), t / 0.25);
if (t < 0.5) return mix(vec3(1.0, 0.5, 0.0), vec3(1.0, 1.0, 0.0), (t - 0.25) / 0.25);
if (t < 0.75) return mix(vec3(1.0, 1.0, 0.0), vec3(0.0, 1.0, 0.0), (t - 0.5) / 0.25);
return mix(vec3(0.0, 1.0, 0.0), vec3(0.0, 1.0, 1.0), (t - 0.75) / 0.25);
}
void main() {
vec4 accum = texture2D(u_accumTexture, v_uv);
float totalValue = accum.r;
float totalWeight = accum.g;
// No coverage - discard if weight is truly zero
if (totalWeight < 0.0001) discard;
// Weighted average RSRP
float avgRsrp = clamp(totalValue / totalWeight, 0.0, 1.0);
// Color mapping
vec3 color = rsrpToColor(avgRsrp);
// Alpha based on weight (fade at edges)
float alpha = min(1.0, totalWeight * 0.1) * u_opacity;
gl_FragColor = vec4(color, alpha);
}
`;
function compileShader(gl: WebGLRenderingContext, source: string, type: number): WebGLShader | null {
const shader = gl.createShader(type);
if (!shader) return null;
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error('[WebGL Radial] Shader error:', gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
function createProgram(gl: WebGLRenderingContext, vsSource: string, fsSource: string): WebGLProgram | null {
const vs = compileShader(gl, vsSource, gl.VERTEX_SHADER);
const fs = compileShader(gl, fsSource, gl.FRAGMENT_SHADER);
if (!vs || !fs) return null;
const program = gl.createProgram();
if (!program) return null;
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error('[WebGL Radial] Program error:', gl.getProgramInfoLog(program));
return null;
}
// Clean up shaders after linking
gl.deleteShader(vs);
gl.deleteShader(fs);
return program;
}
interface Bounds {
minLat: number;
maxLat: number;
minLon: number;
maxLon: number;
}
export default function WebGLRadialCoverageLayer({
points,
opacity,
minRsrp = -130,
maxRsrp = -50,
visible,
radiusMeters = 400,
onWebGLFailed,
}: WebGLRadialCoverageLayerProps) {
const map = useMap();
// Refs for WebGL resources
const canvasRef = useRef<HTMLCanvasElement | null>(null);
const glRef = useRef<WebGLRenderingContext | null>(null);
const pointProgramRef = useRef<WebGLProgram | null>(null);
const compositeProgramRef = useRef<WebGLProgram | null>(null);
const accumTextureRef = useRef<WebGLTexture | null>(null);
const framebufferRef = useRef<WebGLFramebuffer | null>(null);
const quadBufferRef = useRef<WebGLBuffer | null>(null);
const pointBufferRef = useRef<WebGLBuffer | null>(null);
const boundsRef = useRef<Bounds | null>(null);
const initializedRef = useRef(false);
const lastPointsHashRef = useRef<string>('');
const instExtRef = useRef<ANGLE_instanced_arrays | null>(null);
// Track if points need to be re-rendered (expensive pass)
const needsPointRenderRef = useRef(true);
// Stable ref for callback
const onWebGLFailedRef = useRef(onWebGLFailed);
onWebGLFailedRef.current = onWebGLFailed;
// Track framebuffer size
const fbSizeRef = useRef<{ width: number; height: number }>({ width: 0, height: 0 });
// Compute points hash for change detection
const pointsHash = useMemo(() => {
if (points.length === 0) return 'empty';
const first = points[0];
const last = points[points.length - 1];
return `${points.length}:${first.lat.toFixed(5)}:${last.lon.toFixed(5)}:${first.rsrp.toFixed(1)}`;
}, [points]);
// Calculate bounds from points
const calculateBounds = useCallback((pts: CoveragePoint[]): Bounds | null => {
if (pts.length === 0) return null;
let minLat = Infinity, maxLat = -Infinity;
let minLon = Infinity, maxLon = -Infinity;
for (const p of pts) {
if (p.lat < minLat) minLat = p.lat;
if (p.lat > maxLat) maxLat = p.lat;
if (p.lon < minLon) minLon = p.lon;
if (p.lon > maxLon) maxLon = p.lon;
}
// Padding needs to accommodate the radial gradient of edge points
// Each point's gradient extends beyond its center, use 12% of range as padding
const latRangeRaw = maxLat - minLat;
const lonRangeRaw = maxLon - minLon;
const latPaddingGradient = latRangeRaw * 0.12;
const lonPaddingGradient = lonRangeRaw * 0.12;
const latPaddingRadius = radiusMeters / 111000;
const lonPaddingRadius = radiusMeters / (111000 * Math.cos((minLat + maxLat) / 2 * Math.PI / 180));
const latPadding = Math.max(latPaddingGradient, latPaddingRadius);
const lonPadding = Math.max(lonPaddingGradient, lonPaddingRadius);
log(2, 'Bounds padding:', { latPadding: latPadding.toFixed(5), lonPadding: lonPadding.toFixed(5) });
return {
minLat: minLat - latPadding,
maxLat: maxLat + latPadding,
minLon: minLon - lonPadding,
maxLon: maxLon + lonPadding,
};
}, [radiusMeters]);
// Render function - split into point accumulation (expensive) and composite (cheap)
const render = useCallback(() => {
const canvas = canvasRef.current;
const gl = glRef.current;
const pointProgram = pointProgramRef.current;
const compositeProgram = compositeProgramRef.current;
const framebuffer = framebufferRef.current;
const accumTexture = accumTextureRef.current;
const quadBuffer = quadBufferRef.current;
const bounds = boundsRef.current;
if (!canvas || !gl || !pointProgram || !compositeProgram || !framebuffer ||
!accumTexture || !quadBuffer || !bounds) {
return;
}
log(3, 'render() points:', points.length, 'needsPointRender:', needsPointRenderRef.current);
// Position canvas over coverage area
const nw = map.latLngToLayerPoint([bounds.maxLat, bounds.minLon]);
const se = map.latLngToLayerPoint([bounds.minLat, bounds.maxLon]);
const width = Math.abs(se.x - nw.x);
const height = Math.abs(se.y - nw.y);
if (width < 1 || height < 1) return;
canvas.style.transform = `translate(${nw.x}px, ${nw.y}px)`;
canvas.style.width = `${width}px`;
canvas.style.height = `${height}px`;
// Set canvas resolution
const dpr = Math.min(window.devicePixelRatio || 1, 2);
const canvasW = Math.min(Math.round(width * dpr), 2048);
const canvasH = Math.min(Math.round(height * dpr), 2048);
// Resize canvas and framebuffer if needed (with tolerance to avoid subpixel jitter)
const needsResize = Math.abs(canvas.width - canvasW) > 2 || Math.abs(canvas.height - canvasH) > 2;
if (needsResize) {
canvas.width = canvasW;
canvas.height = canvasH;
// Resize accumulation texture
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, canvasW, canvasH, 0, gl.RGBA, gl.FLOAT, null);
fbSizeRef.current = { width: canvasW, height: canvasH };
needsPointRenderRef.current = true; // Must re-render points after resize
}
// === Pass 1: Accumulate points into framebuffer (only when needed) ===
if (needsPointRenderRef.current) {
const t0 = performance.now();
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(pointProgram);
gl.enable(gl.BLEND);
gl.blendFunc(gl.ONE, gl.ONE); // Additive blending
// Get attribute locations
const posLoc = gl.getAttribLocation(pointProgram, 'a_position');
const pointPosLoc = gl.getAttribLocation(pointProgram, 'a_pointPos');
const pointRsrpLoc = gl.getAttribLocation(pointProgram, 'a_pointRsrp');
const pointRadiusLoc = gl.getAttribLocation(pointProgram, 'a_pointRadius');
// Calculate radius in normalized coords
const latRange = bounds.maxLat - bounds.minLat;
const lonRange = bounds.maxLon - bounds.minLon;
// Calculate radius: ensure smooth overlap between adjacent points
const gridDim = Math.sqrt(points.length);
const avgCellLat = latRange / gridDim;
const avgCellLon = lonRange / gridDim;
// For smooth coverage we need each point's gradient to reach ~2 cells in every direction
// Denser grids (more points) need relatively larger multiplier because edge effects matter more
const baseMultiplier = 3.5;
const densityBoost = Math.max(1.0, gridDim / 50); // 1.0 at 50pts, 1.6 at 80pts
const radiusMultiplier = baseMultiplier * densityBoost;
const normalizedRadiusLat = (avgCellLat * radiusMultiplier) / latRange;
const normalizedRadiusLon = (avgCellLon * radiusMultiplier) / lonRange;
const normalizedRadius = Math.max(normalizedRadiusLat, normalizedRadiusLon);
const rsrpRange = maxRsrp - minRsrp;
const instExt = instExtRef.current;
const pointBuffer = pointBufferRef.current;
if (instExt && pointBuffer) {
// === INSTANCED RENDERING: 1 draw call for ALL points ===
// Build instance data buffer: [posX, posY, rsrp, radius] × N points
const instanceData = new Float32Array(points.length * 4);
for (let i = 0; i < points.length; i++) {
const p = points[i];
const normX = (p.lon - bounds.minLon) / lonRange;
const normY = (p.lat - bounds.minLat) / latRange;
const normRsrp = Math.max(0, Math.min(1, (p.rsrp - minRsrp) / rsrpRange));
instanceData[i * 4 + 0] = normX;
instanceData[i * 4 + 1] = normY;
instanceData[i * 4 + 2] = normRsrp;
instanceData[i * 4 + 3] = normalizedRadius;
}
gl.bindBuffer(gl.ARRAY_BUFFER, pointBuffer);
gl.bufferData(gl.ARRAY_BUFFER, instanceData, gl.DYNAMIC_DRAW);
// Bind quad buffer for a_position (per-vertex)
gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer);
gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);
// Bind instance buffer for per-instance attributes
gl.bindBuffer(gl.ARRAY_BUFFER, pointBuffer);
const stride = 4 * 4; // 4 floats × 4 bytes
gl.enableVertexAttribArray(pointPosLoc);
gl.vertexAttribPointer(pointPosLoc, 2, gl.FLOAT, false, stride, 0);
instExt.vertexAttribDivisorANGLE(pointPosLoc, 1); // per-instance
gl.enableVertexAttribArray(pointRsrpLoc);
gl.vertexAttribPointer(pointRsrpLoc, 1, gl.FLOAT, false, stride, 8);
instExt.vertexAttribDivisorANGLE(pointRsrpLoc, 1); // per-instance
gl.enableVertexAttribArray(pointRadiusLoc);
gl.vertexAttribPointer(pointRadiusLoc, 1, gl.FLOAT, false, stride, 12);
instExt.vertexAttribDivisorANGLE(pointRadiusLoc, 1); // per-instance
// ONE draw call for ALL points!
instExt.drawArraysInstancedANGLE(gl.TRIANGLE_STRIP, 0, 4, points.length);
// Reset divisors
instExt.vertexAttribDivisorANGLE(pointPosLoc, 0);
instExt.vertexAttribDivisorANGLE(pointRsrpLoc, 0);
instExt.vertexAttribDivisorANGLE(pointRadiusLoc, 0);
gl.disableVertexAttribArray(posLoc);
gl.disableVertexAttribArray(pointPosLoc);
gl.disableVertexAttribArray(pointRsrpLoc);
gl.disableVertexAttribArray(pointRadiusLoc);
const t1 = performance.now();
log(2, 'Instanced render:', points.length, 'points in 1 call,', (t1 - t0).toFixed(1) + 'ms');
} else {
// === FALLBACK: per-point draw calls ===
gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer);
gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);
for (const p of points) {
const normX = (p.lon - bounds.minLon) / lonRange;
const normY = (p.lat - bounds.minLat) / latRange;
const normRsrp = Math.max(0, Math.min(1, (p.rsrp - minRsrp) / rsrpRange));
gl.vertexAttrib2f(pointPosLoc, normX, normY);
gl.vertexAttrib1f(pointRsrpLoc, normRsrp);
gl.vertexAttrib1f(pointRadiusLoc, normalizedRadius);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
}
gl.disableVertexAttribArray(posLoc);
const t1 = performance.now();
log(2, 'Fallback render:', points.length, 'points in', points.length, 'calls,', (t1 - t0).toFixed(1) + 'ms');
}
log(3, 'Grid estimate:', { points: points.length, gridDim: gridDim.toFixed(1), densityBoost: densityBoost.toFixed(2), radiusMultiplier: radiusMultiplier.toFixed(1), normalizedRadius: normalizedRadius.toFixed(4) });
needsPointRenderRef.current = false;
}
// === Pass 2: Composite to screen (always runs) ===
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(compositeProgram);
gl.enable(gl.BLEND);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA); // Normal blending
// Bind quad buffer
gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer);
const compositePos = gl.getAttribLocation(compositeProgram, 'a_position');
gl.enableVertexAttribArray(compositePos);
gl.vertexAttribPointer(compositePos, 2, gl.FLOAT, false, 0, 0);
// Bind accumulation texture
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
gl.uniform1i(gl.getUniformLocation(compositeProgram, 'u_accumTexture'), 0);
gl.uniform1f(gl.getUniformLocation(compositeProgram, 'u_opacity'), opacity);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
gl.disableVertexAttribArray(compositePos);
}, [map, points, minRsrp, maxRsrp, opacity]);
// Effect 1: Initialize WebGL
useEffect(() => {
if (!visible) return;
if (initializedRef.current && canvasRef.current && glRef.current) return;
const pane = map.getPane('overlayPane');
if (!pane) return;
// Remove any leftover canvas
const existing = pane.querySelectorAll('canvas.webgl-radial-coverage');
existing.forEach(c => c.remove());
// Create canvas
const canvas = document.createElement('canvas');
canvas.className = 'webgl-radial-coverage';
canvas.style.position = 'absolute';
canvas.style.pointerEvents = 'none';
canvas.style.transformOrigin = '0 0';
pane.appendChild(canvas);
canvasRef.current = canvas;
// Initialize WebGL
const gl = canvas.getContext('webgl', { alpha: true, premultipliedAlpha: false });
if (!gl) {
console.error('[WebGL Radial] WebGL not available');
onWebGLFailedRef.current?.();
return;
}
glRef.current = gl;
// Check for float texture support
const floatExt = gl.getExtension('OES_texture_float');
gl.getExtension('OES_texture_float_linear'); // Enable if available
if (!floatExt) {
console.error('[WebGL Radial] OES_texture_float not supported');
onWebGLFailedRef.current?.();
return;
}
// Check for instanced rendering support
const instExt = gl.getExtension('ANGLE_instanced_arrays');
if (instExt) {
log(2, 'Instanced rendering supported');
instExtRef.current = instExt;
} else {
log(1, 'Instanced rendering NOT supported, using fallback');
}
gl.enable(gl.BLEND);
// Create point program
const pointProgram = createProgram(gl, POINT_VERTEX_SHADER, POINT_FRAGMENT_SHADER);
if (!pointProgram) {
console.error('[WebGL Radial] Failed to create point program');
onWebGLFailedRef.current?.();
return;
}
pointProgramRef.current = pointProgram;
// Create composite program
const compositeProgram = createProgram(gl, COMPOSITE_VERTEX_SHADER, COMPOSITE_FRAGMENT_SHADER);
if (!compositeProgram) {
console.error('[WebGL Radial] Failed to create composite program');
onWebGLFailedRef.current?.();
return;
}
compositeProgramRef.current = compositeProgram;
// Create quad buffer (fullscreen quad)
const quadBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
-1, -1,
1, -1,
-1, 1,
1, 1,
]), gl.STATIC_DRAW);
quadBufferRef.current = quadBuffer;
// Create point buffer (will be filled per-point for now, TODO: instancing)
const pointBuffer = gl.createBuffer();
pointBufferRef.current = pointBuffer;
// Create accumulation texture (float RGBA)
// Use NEAREST filtering - float textures require OES_texture_float_linear for LINEAR
const accumTexture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, accumTexture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.FLOAT, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
accumTextureRef.current = accumTexture;
// Create framebuffer
const framebuffer = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, accumTexture, 0);
framebufferRef.current = framebuffer;
// Check framebuffer status
const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
if (status !== gl.FRAMEBUFFER_COMPLETE) {
console.error('[WebGL Radial] Framebuffer not complete:', status);
onWebGLFailedRef.current?.();
return;
}
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
initializedRef.current = true;
}, [visible, map]);
// Effect 2: Update bounds when points change
useEffect(() => {
if (!visible || points.length === 0) return;
if (pointsHash === lastPointsHashRef.current) return;
const bounds = calculateBounds(points);
if (!bounds) return;
boundsRef.current = bounds;
lastPointsHashRef.current = pointsHash;
needsPointRenderRef.current = true; // Mark for point re-render
render();
}, [visible, points, pointsHash, calculateBounds, render]);
// Effect 3: Map event listeners
useEffect(() => {
if (!visible) return;
let frameId = 0;
const onMapChange = () => {
cancelAnimationFrame(frameId);
frameId = requestAnimationFrame(render);
};
map.on('move', onMapChange);
map.on('zoom', onMapChange);
map.on('resize', onMapChange);
return () => {
map.off('move', onMapChange);
map.off('zoom', onMapChange);
map.off('resize', onMapChange);
cancelAnimationFrame(frameId);
};
}, [visible, map, render]);
// Effect 4: Visibility toggle
useEffect(() => {
if (canvasRef.current) {
canvasRef.current.style.display = visible ? 'block' : 'none';
}
}, [visible]);
// Cleanup on unmount
useEffect(() => {
return () => {
const gl = glRef.current;
if (gl) {
if (accumTextureRef.current) gl.deleteTexture(accumTextureRef.current);
if (framebufferRef.current) gl.deleteFramebuffer(framebufferRef.current);
if (quadBufferRef.current) gl.deleteBuffer(quadBufferRef.current);
if (pointBufferRef.current) gl.deleteBuffer(pointBufferRef.current);
if (pointProgramRef.current) gl.deleteProgram(pointProgramRef.current);
if (compositeProgramRef.current) gl.deleteProgram(compositeProgramRef.current);
}
if (canvasRef.current) {
canvasRef.current.remove();
canvasRef.current = null;
}
};
}, []);
return null;
}

View File

@@ -0,0 +1,77 @@
/**
* Quick frequency band selector for setting all sectors at once.
* Enables rapid comparison of coverage at different frequency bands.
*/
import { useSitesStore } from '@/store/sites.ts';
import { COMMON_FREQUENCIES } from '@/constants/frequencies.ts';
const QUICK_BANDS = [
{ freq: 70, label: '70', color: 'text-indigo-400' },
{ freq: 225, label: '225', color: 'text-cyan-400' },
{ freq: 700, label: '700', color: 'text-red-400' },
{ freq: 800, label: '800', color: 'text-orange-400' },
{ freq: 900, label: '900', color: 'text-yellow-400' },
{ freq: 1800, label: '1.8G', color: 'text-green-400' },
{ freq: 2100, label: '2.1G', color: 'text-blue-400' },
{ freq: 2600, label: '2.6G', color: 'text-purple-400' },
{ freq: 3500, label: '3.5G', color: 'text-pink-400' },
];
export default function BatchFrequencyChange() {
const sites = useSitesStore((s) => s.sites);
const setAllSitesFrequency = useSitesStore((s) => s.setAllSitesFrequency);
if (sites.length === 0) return null;
// Get current frequency (from first site)
const currentFreq = sites[0]?.frequency ?? 1800;
// Check if all sites have same frequency
const allSameFreq = sites.every((s) => s.frequency === currentFreq);
// Get band info
const getBandName = (freq: number) => {
const band = COMMON_FREQUENCIES.find((b) => b.value === freq);
return band?.name ?? `${freq} MHz`;
};
const handleSetFrequency = async (freq: number) => {
await setAllSitesFrequency(freq);
};
return (
<div className="p-3 border-t border-gray-200 dark:border-dark-border">
<div className="flex items-center justify-between mb-2">
<h4 className="text-xs font-semibold text-gray-500 dark:text-dark-muted uppercase">
Quick Frequency
</h4>
<span className="text-[10px] text-gray-400 dark:text-dark-muted">
{allSameFreq ? getBandName(currentFreq) : 'Mixed'}
</span>
</div>
<div className="flex flex-wrap gap-1">
{QUICK_BANDS.map((b) => {
const isActive = allSameFreq && currentFreq === b.freq;
return (
<button
key={b.freq}
onClick={() => handleSetFrequency(b.freq)}
className={`px-2 py-1 text-xs rounded transition-colors ${
isActive
? 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-300 ring-1 ring-blue-400'
: 'bg-gray-100 hover:bg-gray-200 dark:bg-dark-border dark:hover:bg-dark-muted text-gray-700 dark:text-dark-text'
}`}
title={`Set all sectors to ${b.freq} MHz (${getBandName(b.freq)})`}
>
<span className={isActive ? '' : b.color}>{b.label}</span>
</button>
);
})}
</div>
<div className="mt-1.5 text-[10px] text-gray-400 dark:text-dark-muted">
Sets all {sites.length} sector{sites.length !== 1 ? 's' : ''} to selected band
</div>
</div>
);
}

View File

@@ -3,6 +3,8 @@ import { useCalcHistoryStore } from '@/store/calcHistory.ts';
import type { CalculationEntry } from '@/store/calcHistory.ts'; import type { CalculationEntry } from '@/store/calcHistory.ts';
function EntryDetail({ entry }: { entry: CalculationEntry }) { function EntryDetail({ entry }: { entry: CalculationEntry }) {
const p = entry.propagation;
return ( return (
<div className="mt-1.5 pt-1.5 border-t border-gray-100 dark:border-dark-border space-y-1.5 text-[10px]"> <div className="mt-1.5 pt-1.5 border-t border-gray-100 dark:border-dark-border space-y-1.5 text-[10px]">
{/* Coverage breakdown with percentages */} {/* Coverage breakdown with percentages */}
@@ -38,6 +40,73 @@ function EntryDetail({ entry }: { entry: CalculationEntry }) {
<span>Avg RSRP: {entry.avgRsrp.toFixed(1)} dBm</span> <span>Avg RSRP: {entry.avgRsrp.toFixed(1)} dBm</span>
<span>Range: {entry.rangeMin.toFixed(0)} / {entry.rangeMax.toFixed(0)} dBm</span> <span>Range: {entry.rangeMin.toFixed(0)} / {entry.rangeMax.toFixed(0)} dBm</span>
</div> </div>
{/* Propagation details */}
{p && (
<div className="pt-1.5 border-t border-gray-100 dark:border-dark-border space-y-1">
{/* Site parameters */}
<div className="flex flex-wrap gap-x-3 gap-y-0.5 text-gray-500 dark:text-dark-muted">
<span>{p.frequency} MHz</span>
<span>{p.txPower} dBm</span>
<span>{p.antennaGain} dBi</span>
<span>{p.antennaHeight} m</span>
</div>
{/* Models used */}
{p.modelsUsed.length > 0 && (
<div className="flex flex-wrap gap-1">
{p.modelsUsed.map((model) => (
<span
key={model}
className="px-1 py-0.5 bg-gray-100 dark:bg-dark-border text-gray-600 dark:text-dark-muted rounded"
>
{model}
</span>
))}
</div>
)}
{/* Active toggles summary */}
<div className="flex flex-wrap gap-1">
{p.use_terrain && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Terrain</span>
)}
{p.use_buildings && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Buildings</span>
)}
{p.use_materials && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Materials</span>
)}
{p.use_dominant_path && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">DomPath</span>
)}
{p.use_reflections && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Reflections</span>
)}
{p.use_vegetation && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Vegetation</span>
)}
{p.use_atmospheric && (
<span className="px-1 py-0.5 bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300 rounded">Atmospheric</span>
)}
{p.fading_margin > 0 && (
<span className="px-1 py-0.5 bg-orange-50 dark:bg-orange-900/20 text-orange-700 dark:text-orange-300 rounded">
-{p.fading_margin} dB fade
</span>
)}
{p.rain_rate > 0 && (
<span className="px-1 py-0.5 bg-blue-50 dark:bg-blue-900/20 text-blue-700 dark:text-blue-300 rounded">
Rain {p.rain_rate} mm/h
</span>
)}
{p.indoor_loss_type !== 'none' && (
<span className="px-1 py-0.5 bg-purple-50 dark:bg-purple-900/20 text-purple-700 dark:text-purple-300 rounded">
Indoor: {p.indoor_loss_type}
</span>
)}
</div>
</div>
)}
</div> </div>
); );
} }

View File

@@ -0,0 +1,361 @@
/**
* Link Budget Calculator Panel
*
* Shows complete RF link budget from transmitter to receiver:
* - TX: power, gain, cable loss, EIRP
* - Path: distance, FSPL, terrain loss
* - RX: gain, sensitivity, margin
*/
import { useState, useEffect } from 'react';
import { useSitesStore } from '@/store/sites.ts';
import { api } from '@/services/api.ts';
import type { LinkBudgetResponse } from '@/services/api.ts';
import Button from '@/components/ui/Button.tsx';
interface LinkBudgetPanelProps {
/** Optional RX coordinates from map click */
rxPoint?: { lat: number; lon: number } | null;
/** Callback to enable map click mode */
onRequestMapClick?: () => void;
/** Callback when panel is closed */
onClose?: () => void;
}
export default function LinkBudgetPanel({
rxPoint,
onRequestMapClick,
onClose,
}: LinkBudgetPanelProps) {
const sites = useSitesStore((s) => s.sites);
const selectedSiteId = useSitesStore((s) => s.selectedSiteId);
// TX parameters (from selected site or manual)
const selectedSite = sites.find((s) => s.id === selectedSiteId);
// TX height override for what-if scenarios (null = use site default)
const [txHeightOverride, setTxHeightOverride] = useState<number | null>(null);
const txHeight = txHeightOverride ?? selectedSite?.height ?? 30;
// Reset height override when site changes
useEffect(() => {
setTxHeightOverride(null);
}, [selectedSiteId]);
// RX coordinates
const [rxLat, setRxLat] = useState<string>(rxPoint?.lat?.toFixed(6) || '');
const [rxLon, setRxLon] = useState<string>(rxPoint?.lon?.toFixed(6) || '');
// Additional TX/RX parameters
const [txCableLoss, setTxCableLoss] = useState<number>(2);
const [rxGain, setRxGain] = useState<number>(0);
const [rxCableLoss, setRxCableLoss] = useState<number>(0);
const [rxSensitivity, setRxSensitivity] = useState<number>(-100);
const [rxHeight, setRxHeight] = useState<number>(1.5);
// Result
const [result, setResult] = useState<LinkBudgetResponse | null>(null);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
// Update RX coordinates when rxPoint changes
useEffect(() => {
if (rxPoint) {
setRxLat(rxPoint.lat.toFixed(6));
setRxLon(rxPoint.lon.toFixed(6));
}
}, [rxPoint]);
const calculateLinkBudget = async () => {
if (!selectedSite) {
setError('Select a site first');
return;
}
const rxLatNum = parseFloat(rxLat);
const rxLonNum = parseFloat(rxLon);
if (isNaN(rxLatNum) || isNaN(rxLonNum)) {
setError('Enter valid RX coordinates');
return;
}
setLoading(true);
setError(null);
try {
const response = await api.calculateLinkBudget({
tx_lat: selectedSite.lat,
tx_lon: selectedSite.lon,
tx_power_dbm: selectedSite.power,
tx_gain_dbi: selectedSite.gain,
tx_cable_loss_db: txCableLoss,
tx_height_m: txHeight,
rx_lat: rxLatNum,
rx_lon: rxLonNum,
rx_gain_dbi: rxGain,
rx_cable_loss_db: rxCableLoss,
rx_sensitivity_dbm: rxSensitivity,
rx_height_m: rxHeight,
frequency_mhz: selectedSite.frequency,
});
setResult(response);
} catch (err) {
setError(err instanceof Error ? err.message : 'Calculation failed');
} finally {
setLoading(false);
}
};
const marginColor = result
? result.margin_db >= 10
? 'text-green-600 dark:text-green-400'
: result.margin_db >= 0
? 'text-yellow-600 dark:text-yellow-400'
: 'text-red-600 dark:text-red-400'
: '';
return (
<div
className="bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-sm p-4 space-y-4 w-80"
onClick={(e) => e.stopPropagation()}
onMouseDown={(e) => e.stopPropagation()}
onPointerDown={(e) => e.stopPropagation()}
>
{/* Header */}
<div className="flex items-center justify-between">
<h3 className="text-sm font-semibold text-gray-800 dark:text-dark-text flex items-center gap-2">
<span className="text-lg">📡</span>
Link Budget Calculator
</h3>
{onClose && (
<button
onClick={onClose}
className="text-gray-400 hover:text-gray-600 dark:hover:text-white text-sm"
>
</button>
)}
</div>
{/* TX Section */}
<div className="space-y-2">
<div className="text-xs font-medium text-gray-500 dark:text-dark-muted uppercase">
Transmitter
</div>
{selectedSite ? (
<div className="text-xs space-y-1 bg-gray-50 dark:bg-dark-bg p-2 rounded text-gray-700 dark:text-dark-text">
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">Site:</span>
<span className="font-medium">{selectedSite.name}</span>
</div>
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">Power:</span>
<span>{selectedSite.power} dBm</span>
</div>
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">Gain:</span>
<span>{selectedSite.gain} dBi</span>
</div>
<div className="flex justify-between items-center">
<span className="text-gray-500 dark:text-dark-muted">Height:</span>
<div className="flex items-center">
<input
type="number"
value={txHeight}
onChange={(e) => setTxHeightOverride(parseFloat(e.target.value) || 30)}
className="w-16 text-right text-xs px-1 py-0.5 border rounded dark:bg-dark-bg dark:border-dark-border dark:text-dark-text"
min="1"
max="300"
step="1"
/>
<span className="text-gray-400 dark:text-dark-muted ml-1">m</span>
</div>
</div>
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">Frequency:</span>
<span>{selectedSite.frequency} MHz</span>
</div>
<div className="flex justify-between items-center">
<span className="text-gray-500 dark:text-dark-muted">Cable Loss:</span>
<input
type="number"
value={txCableLoss}
onChange={(e) => setTxCableLoss(parseFloat(e.target.value) || 0)}
className="w-16 text-right text-xs px-1 py-0.5 border rounded dark:bg-dark-bg dark:border-dark-border dark:text-dark-text"
step="0.5"
/>
<span className="text-gray-400 dark:text-dark-muted ml-1">dB</span>
</div>
</div>
) : (
<div className="text-xs text-gray-400 dark:text-dark-muted italic">Select a site on the map</div>
)}
</div>
{/* RX Section */}
<div className="space-y-2">
<div className="text-xs font-medium text-gray-500 dark:text-dark-muted uppercase">
Receiver
</div>
<div className="grid grid-cols-2 gap-2">
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">Latitude</label>
<input
type="text"
value={rxLat}
onChange={(e) => setRxLat(e.target.value)}
placeholder="48.4500"
className="w-full text-xs px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">Longitude</label>
<input
type="text"
value={rxLon}
onChange={(e) => setRxLon(e.target.value)}
placeholder="35.0400"
className="w-full text-xs px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
</div>
{onRequestMapClick && (
<Button size="sm" variant="secondary" onClick={onRequestMapClick} className="w-full">
📍 Click on Map to Set RX Point
</Button>
)}
<div className="grid grid-cols-2 gap-2 text-xs">
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">RX Gain (dBi)</label>
<input
type="number"
value={rxGain}
onChange={(e) => setRxGain(parseFloat(e.target.value) || 0)}
className="w-full px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">RX Height (m)</label>
<input
type="number"
value={rxHeight}
onChange={(e) => setRxHeight(parseFloat(e.target.value) || 1.5)}
className="w-full px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">Sensitivity (dBm)</label>
<input
type="number"
value={rxSensitivity}
onChange={(e) => setRxSensitivity(parseFloat(e.target.value) || -100)}
className="w-full px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
<div>
<label className="text-[10px] text-gray-400 dark:text-dark-muted">Cable Loss (dB)</label>
<input
type="number"
value={rxCableLoss}
onChange={(e) => setRxCableLoss(parseFloat(e.target.value) || 0)}
className="w-full px-2 py-1 border rounded dark:bg-dark-bg dark:border-dark-border text-gray-800 dark:text-dark-text"
/>
</div>
</div>
</div>
{/* Calculate Button */}
<Button
onClick={calculateLinkBudget}
disabled={loading || !selectedSite}
className="w-full"
>
{loading ? 'Calculating...' : 'Calculate Link Budget'}
</Button>
{/* Error */}
{error && (
<div className="text-xs text-red-500 bg-red-50 dark:bg-red-900/20 p-2 rounded">
{error}
</div>
)}
{/* Results */}
{result && (
<div className="space-y-2 border-t pt-3 dark:border-dark-border">
<div className="text-xs font-medium text-gray-500 dark:text-dark-muted uppercase">
Results
</div>
{/* Path Info */}
<div className="text-xs space-y-1 bg-gray-50 dark:bg-dark-bg p-2 rounded text-gray-700 dark:text-dark-text">
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">Distance:</span>
<span className="font-medium">{result.distance_km.toFixed(2)} km</span>
</div>
<div className="flex justify-between">
<span className="text-gray-500 dark:text-dark-muted">LOS:</span>
<span className={result.los_clear ? 'text-green-600 dark:text-green-400' : 'text-red-500 dark:text-red-400'}>
{result.los_clear ? '✓ Clear' : '✗ Blocked'}
</span>
</div>
</div>
{/* Link Budget Table */}
<div className="text-xs space-y-1 bg-blue-50 dark:bg-blue-900/20 p-2 rounded text-gray-700 dark:text-dark-text">
<div className="flex justify-between">
<span>EIRP:</span>
<span className="font-mono">{result.eirp_dbm.toFixed(1)} dBm</span>
</div>
<div className="flex justify-between text-gray-500 dark:text-dark-muted">
<span>- FSPL:</span>
<span className="font-mono">{result.fspl_db.toFixed(1)} dB</span>
</div>
<div className="flex justify-between text-gray-500 dark:text-dark-muted">
<span>- Terrain Loss:</span>
<span className="font-mono">{result.terrain_loss_db.toFixed(1)} dB</span>
</div>
<div className="flex justify-between border-t pt-1 dark:border-dark-border">
<span>= Total Path Loss:</span>
<span className="font-mono font-medium">{result.total_path_loss_db.toFixed(1)} dB</span>
</div>
</div>
{/* Final Result */}
<div className="text-xs space-y-1 bg-gray-100 dark:bg-dark-border p-2 rounded text-gray-700 dark:text-dark-text">
<div className="flex justify-between">
<span>Received Power:</span>
<span className="font-mono font-medium">{result.rx_power_dbm.toFixed(1)} dBm</span>
</div>
<div className="flex justify-between">
<span>RX Sensitivity:</span>
<span className="font-mono">{rxSensitivity} dBm</span>
</div>
<div className={`flex justify-between font-bold ${marginColor}`}>
<span>Link Margin:</span>
<span className="font-mono">{result.margin_db.toFixed(1)} dB</span>
</div>
<div className={`text-center text-sm font-bold mt-2 ${marginColor}`}>
{result.status === 'OK' ? '✓ LINK OK' : '✗ LINK FAIL'}
</div>
</div>
{/* Obstructions */}
{result.obstructions && result.obstructions.length > 0 && (
<div className="text-xs text-orange-600 dark:text-orange-400 bg-orange-50 dark:bg-orange-900/20 p-2 rounded">
<div className="font-medium mb-1"> Terrain Obstructions:</div>
{result.obstructions.slice(0, 3).map((obs, i) => (
<div key={i}>
@ {(obs.distance_m / 1000).toFixed(2)} km: +{obs.height_above_los_m.toFixed(1)} m above LOS
</div>
))}
{result.obstructions.length > 3 && (
<div className="text-gray-500">...and {result.obstructions.length - 3} more</div>
)}
</div>
)}
</div>
)}
</div>
);
}

View File

@@ -1,6 +1,7 @@
import { useState, useCallback, useMemo } from 'react'; import { useState, useCallback, useMemo } from 'react';
import type { Site } from '@/types/index.ts'; import type { Site } from '@/types/index.ts';
import { useSitesStore } from '@/store/sites.ts'; import { useSitesStore } from '@/store/sites.ts';
import { useToolStore } from '@/store/tools.ts';
import { useToastStore } from '@/components/ui/Toast.tsx'; import { useToastStore } from '@/components/ui/Toast.tsx';
import Button from '@/components/ui/Button.tsx'; import Button from '@/components/ui/Button.tsx';
import ConfirmDialog from '@/components/ui/ConfirmDialog.tsx'; import ConfirmDialog from '@/components/ui/ConfirmDialog.tsx';
@@ -75,9 +76,20 @@ export default function SiteList({ onEditSite, onAddSite }: SiteListProps) {
const deleteSite = useSitesStore((s) => s.deleteSite); const deleteSite = useSitesStore((s) => s.deleteSite);
const selectSite = useSitesStore((s) => s.selectSite); const selectSite = useSitesStore((s) => s.selectSite);
const selectedSiteId = useSitesStore((s) => s.selectedSiteId); const selectedSiteId = useSitesStore((s) => s.selectedSiteId);
const isPlacingMode = useSitesStore((s) => s.isPlacingMode);
const togglePlacingMode = useSitesStore((s) => s.togglePlacingMode);
const selectedSiteIds = useSitesStore((s) => s.selectedSiteIds); const selectedSiteIds = useSitesStore((s) => s.selectedSiteIds);
// Tool store for site placement mode
const activeTool = useToolStore((s) => s.activeTool);
const setActiveTool = useToolStore((s) => s.setActiveTool);
const clearTool = useToolStore((s) => s.clearTool);
const isPlacingMode = activeTool === 'site-placement';
const togglePlacingMode = useCallback(() => {
if (isPlacingMode) {
clearTool();
} else {
setActiveTool('site-placement');
}
}, [isPlacingMode, setActiveTool, clearTool]);
const toggleSiteSelection = useSitesStore((s) => s.toggleSiteSelection); const toggleSiteSelection = useSitesStore((s) => s.toggleSiteSelection);
const selectAllSites = useSitesStore((s) => s.selectAllSites); const selectAllSites = useSitesStore((s) => s.selectAllSites);
const clearSelection = useSitesStore((s) => s.clearSelection); const clearSelection = useSitesStore((s) => s.clearSelection);

View File

@@ -1,6 +1,7 @@
/** /**
* Small header badge showing the active compute backend (CPU or GPU). * Small header badge showing the active compute backend (CPU or GPU).
* Fetches status on mount. Clicking opens a dropdown to switch devices. * Fetches status on mount. Clicking opens a dropdown to switch devices.
* Dropdown opens to the LEFT to avoid overlapping map controls.
*/ */
import { useState, useEffect, useRef } from 'react'; import { useState, useEffect, useRef } from 'react';
@@ -11,6 +12,8 @@ export default function GPUIndicator() {
const [status, setStatus] = useState<GPUStatus | null>(null); const [status, setStatus] = useState<GPUStatus | null>(null);
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
const [switching, setSwitching] = useState(false); const [switching, setSwitching] = useState(false);
const [diagnostics, setDiagnostics] = useState<Record<string, unknown> | null>(null);
const [showDiag, setShowDiag] = useState(false);
const ref = useRef<HTMLDivElement>(null); const ref = useRef<HTMLDivElement>(null);
useEffect(() => { useEffect(() => {
@@ -23,16 +26,24 @@ export default function GPUIndicator() {
const handler = (e: MouseEvent) => { const handler = (e: MouseEvent) => {
if (ref.current && !ref.current.contains(e.target as Node)) { if (ref.current && !ref.current.contains(e.target as Node)) {
setOpen(false); setOpen(false);
setShowDiag(false);
} }
}; };
document.addEventListener('mousedown', handler); document.addEventListener('mousedown', handler);
return () => document.removeEventListener('mousedown', handler); return () => document.removeEventListener('mousedown', handler);
}, [open]); }, [open]);
// Auto-fetch diagnostics when dropdown opens and only CPU available
useEffect(() => {
if (open && status?.active_backend === 'cpu' && !diagnostics) {
api.getGPUDiagnostics().then(setDiagnostics).catch(() => {});
}
}, [open, status?.active_backend, diagnostics]);
if (!status) return null; if (!status) return null;
const isGPU = status.active_backend !== 'cpu'; const isGPU = status.active_backend !== 'cpu';
// Short label for header badge // Short label: just "CPU" or first word of GPU name
const label = isGPU const label = isGPU
? (status.active_device?.name?.split(' ')[0] ?? 'GPU') ? (status.active_device?.name?.split(' ')[0] ?? 'GPU')
: 'CPU'; : 'CPU';
@@ -50,6 +61,16 @@ export default function GPUIndicator() {
setOpen(false); setOpen(false);
}; };
const handleRunDiagnostics = async () => {
try {
const diag = await api.getGPUDiagnostics();
setDiagnostics(diag);
setShowDiag(true);
} catch {
// ignore
}
};
return ( return (
<div ref={ref} className="relative"> <div ref={ref} className="relative">
<button <button
@@ -59,13 +80,13 @@ export default function GPUIndicator() {
? 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-300 dark:hover:bg-green-900/50' ? 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-300 dark:hover:bg-green-900/50'
: 'bg-gray-100 text-gray-600 hover:bg-gray-200 dark:bg-dark-border dark:text-dark-muted dark:hover:bg-dark-muted' : 'bg-gray-100 text-gray-600 hover:bg-gray-200 dark:bg-dark-border dark:text-dark-muted dark:hover:bg-dark-muted'
}`} }`}
title={`Compute: ${label}`} title={`Compute: ${status.active_device?.name ?? label}`}
> >
{isGPU ? '\u26A1' : '\u2699\uFE0F'} {label} {isGPU ? '\u26A1' : '\u2699\uFE0F'} {label}
</button> </button>
{open && ( {open && (
<div className="absolute top-full right-0 mt-1 w-56 bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-lg z-50 py-1"> <div className="absolute top-full left-0 mt-1 w-64 bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-lg z-[9999] py-1">
<div className="px-3 py-1.5 text-[10px] font-semibold text-gray-400 dark:text-dark-muted uppercase"> <div className="px-3 py-1.5 text-[10px] font-semibold text-gray-400 dark:text-dark-muted uppercase">
Compute Devices Compute Devices
</div> </div>
@@ -98,6 +119,52 @@ export default function GPUIndicator() {
</button> </button>
); );
})} })}
{/* Show help when only CPU available */}
{status.available_devices.length === 1 && status.active_backend === 'cpu' && (
<div className="border-t border-gray-100 dark:border-dark-border mt-1 pt-2 px-3 pb-2">
<div className="text-[10px] text-yellow-600 dark:text-yellow-400 mb-2">
No GPU detected. For faster calculations:
</div>
{diagnostics?.is_wsl ? (
<div className="text-[10px] text-gray-500 dark:text-dark-muted space-y-1">
<div className="text-[9px] text-gray-400 dark:text-dark-muted mb-1">WSL2 detected - use pip3:</div>
<div className="bg-gray-100 dark:bg-dark-border px-2 py-1 rounded font-mono text-[9px] break-all">
pip3 install cupy-cuda12x --break-system-packages
</div>
<div className="text-[9px] text-gray-400 dark:text-dark-muted mt-1">Then restart RFCP</div>
</div>
) : (
<div className="text-[10px] text-gray-500 dark:text-dark-muted space-y-0.5">
<div>NVIDIA: <code className="bg-gray-100 dark:bg-dark-border px-1 rounded">pip install cupy-cuda12x</code></div>
<div>Intel/AMD: <code className="bg-gray-100 dark:bg-dark-border px-1 rounded">pip install pyopencl</code></div>
</div>
)}
{typeof diagnostics?.nvidia_smi === 'string' && diagnostics.nvidia_smi !== 'not found or error' && (
<div className="mt-2 text-[9px] text-green-600 dark:text-green-400">
GPU hardware found: {diagnostics.nvidia_smi.split(',')[0]}
</div>
)}
<button
onClick={handleRunDiagnostics}
className="mt-2 w-full text-[10px] text-blue-600 dark:text-blue-400 hover:underline text-left"
>
{diagnostics ? 'Refresh Diagnostics' : 'Run Diagnostics'}
</button>
</div>
)}
{/* Diagnostics output */}
{showDiag && diagnostics && (
<div className="border-t border-gray-100 dark:border-dark-border mt-1 pt-2 px-3 pb-2 max-h-48 overflow-y-auto">
<div className="text-[10px] font-semibold text-gray-500 dark:text-dark-muted mb-1">
Diagnostics
</div>
<pre className="text-[9px] text-gray-600 dark:text-gray-400 whitespace-pre-wrap break-all">
{JSON.stringify(diagnostics, null, 2)}
</pre>
</div>
)}
</div> </div>
)} )}
</div> </div>

View File

@@ -2,6 +2,7 @@ import { useEffect } from 'react';
import { useSitesStore } from '@/store/sites.ts'; import { useSitesStore } from '@/store/sites.ts';
import { useCoverageStore } from '@/store/coverage.ts'; import { useCoverageStore } from '@/store/coverage.ts';
import { useSettingsStore } from '@/store/settings.ts'; import { useSettingsStore } from '@/store/settings.ts';
import { useToolStore } from '@/store/tools.ts';
import { useToastStore } from '@/components/ui/Toast.tsx'; import { useToastStore } from '@/components/ui/Toast.tsx';
interface ShortcutHandlers { interface ShortcutHandlers {
@@ -63,7 +64,7 @@ export function useKeyboardShortcuts({
// Escape always works // Escape always works
if (e.key === 'Escape') { if (e.key === 'Escape') {
useSitesStore.getState().selectSite(null); useSitesStore.getState().selectSite(null);
useSitesStore.getState().setPlacingMode(false); useToolStore.getState().clearTool();
onCloseForm(); onCloseForm();
return; return;
} }
@@ -76,7 +77,7 @@ export function useKeyboardShortcuts({
switch (e.key.toUpperCase()) { switch (e.key.toUpperCase()) {
case 'S': // Shift+S: New site (place mode) case 'S': // Shift+S: New site (place mode)
e.preventDefault(); e.preventDefault();
useSitesStore.getState().setPlacingMode(true); useToolStore.getState().setActiveTool('site-placement');
useToastStore.getState().addToast('Click on map to place new site', 'info'); useToastStore.getState().addToast('Click on map to place new site', 'info');
return; return;
case 'C': // Shift+C: Clear coverage case 'C': // Shift+C: Clear coverage

View File

@@ -35,6 +35,31 @@
width: 100%; width: 100%;
height: 100%; height: 100%;
z-index: 0; z-index: 0;
cursor: default !important;
}
/* Remove grab cursor from interactive layers */
.leaflet-interactive {
cursor: default !important;
}
/* Grabbing only when actually dragging */
.leaflet-container.leaflet-dragging,
.leaflet-container:active {
cursor: grabbing !important;
}
/* Tool-specific cursors (applied via JS class toggle) */
.leaflet-container.tool-ruler {
cursor: crosshair !important;
}
.leaflet-container.tool-rx-placement {
cursor: crosshair !important;
}
.leaflet-container.tool-site-placement {
cursor: cell !important;
} }
/* Dark mode map tiles (invert brightness slightly) */ /* Dark mode map tiles (invert brightness slightly) */

View File

@@ -75,6 +75,11 @@ export interface ApiCoverageStats {
points_with_atmospheric_loss: number; points_with_atmospheric_loss: number;
} }
export interface ApiBoundaryPoint {
lat: number;
lon: number;
}
export interface CoverageResponse { export interface CoverageResponse {
points: ApiCoveragePoint[]; points: ApiCoveragePoint[];
count: number; count: number;
@@ -82,6 +87,7 @@ export interface CoverageResponse {
stats: ApiCoverageStats; stats: ApiCoverageStats;
computation_time: number; computation_time: number;
models_used: string[]; models_used: string[];
boundary?: ApiBoundaryPoint[];
} }
export interface Preset { export interface Preset {
@@ -240,6 +246,12 @@ class ApiService {
return response.json(); return response.json();
} }
async getGPUDiagnostics(): Promise<Record<string, unknown>> {
const response = await fetch(`${API_BASE}/api/gpu/diagnostics`);
if (!response.ok) throw new Error('Failed to get GPU diagnostics');
return response.json();
}
// === Terrain Profile API === // === Terrain Profile API ===
async getTerrainProfile( async getTerrainProfile(
@@ -259,6 +271,51 @@ class ApiService {
const data = await response.json(); const data = await response.json();
return data.profile ?? data; return data.profile ?? data;
} }
// === Link Budget API ===
async calculateLinkBudget(request: LinkBudgetRequest): Promise<LinkBudgetResponse> {
const response = await fetch(`${API_BASE}/api/coverage/link-budget`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!response.ok) {
const error = await response.json().catch(() => ({ detail: 'Link budget calculation failed' }));
throw new Error(error.detail || 'Link budget calculation failed');
}
return response.json();
}
// === Fresnel Profile API ===
async getFresnelProfile(request: FresnelProfileRequest): Promise<FresnelProfileResponse> {
const response = await fetch(`${API_BASE}/api/coverage/fresnel-profile`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!response.ok) {
const error = await response.json().catch(() => ({ detail: 'Fresnel profile calculation failed' }));
throw new Error(error.detail || 'Fresnel profile calculation failed');
}
return response.json();
}
// === Interference API ===
async calculateInterference(request: CoverageRequest): Promise<InterferenceResponse> {
const response = await fetch(`${API_BASE}/api/coverage/interference`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!response.ok) {
const error = await response.json().catch(() => ({ detail: 'Interference calculation failed' }));
throw new Error(error.detail || 'Interference calculation failed');
}
return response.json();
}
} }
// === Region types === // === Region types ===
@@ -316,4 +373,113 @@ export interface TerrainProfilePoint {
distance: number; distance: number;
} }
// === Link Budget types ===
export interface LinkBudgetRequest {
tx_lat: number;
tx_lon: number;
tx_power_dbm: number;
tx_gain_dbi: number;
tx_cable_loss_db: number;
tx_height_m: number;
rx_lat: number;
rx_lon: number;
rx_gain_dbi: number;
rx_cable_loss_db: number;
rx_sensitivity_dbm: number;
rx_height_m: number;
frequency_mhz: number;
}
export interface LinkBudgetResponse {
distance_km: number;
distance_m: number;
tx_elevation_m: number;
rx_elevation_m: number;
eirp_dbm: number;
fspl_db: number;
terrain_loss_db: number;
total_path_loss_db: number;
los_clear: boolean;
obstructions: { distance_m: number; height_above_los_m: number }[];
rx_power_dbm: number;
margin_db: number;
status: 'OK' | 'FAIL';
link_budget: {
tx_power_dbm: number;
tx_gain_dbi: number;
tx_cable_loss_db: number;
rx_gain_dbi: number;
rx_cable_loss_db: number;
rx_sensitivity_dbm: number;
};
}
// === Fresnel Profile types ===
export interface FresnelProfileRequest {
tx_lat: number;
tx_lon: number;
tx_height_m: number;
rx_lat: number;
rx_lon: number;
rx_height_m: number;
frequency_mhz: number;
num_points?: number;
}
export interface FresnelProfilePoint {
distance: number;
lat: number;
lon: number;
terrain_elevation: number;
los_height: number;
fresnel_top: number;
fresnel_bottom: number;
f1_radius: number;
clearance: number;
}
export interface FresnelProfileResponse {
profile: FresnelProfilePoint[];
total_distance_m: number;
tx_elevation: number;
rx_elevation: number;
frequency_mhz: number;
wavelength_m: number;
los_clear: boolean;
fresnel_clear: boolean;
fresnel_clear_pct: number;
worst_clearance_m: number;
estimated_loss_db: number;
recommendation: string;
}
// === Interference types ===
export interface InterferencePoint {
lat: number;
lon: number;
ci_ratio_db: number;
best_server_idx: number;
best_server_rsrp: number;
}
export interface InterferenceResponse {
points: InterferencePoint[];
count: number;
stats: {
min_ci_db: number;
max_ci_db: number;
avg_ci_db: number;
good_coverage_pct: number;
marginal_coverage_pct: number;
interference_dominant_pct: number;
};
computation_time: number;
sites: { name: string; frequency_mhz: number }[];
frequency_groups: Record<number, number>;
warning: string | null;
}
export const api = new ApiService(); export const api = new ApiService();

View File

@@ -1,5 +1,29 @@
import { create } from 'zustand'; import { create } from 'zustand';
export interface PropagationSnapshot {
// Models used
modelsUsed: string[];
use_terrain: boolean;
use_buildings: boolean;
use_materials: boolean;
use_dominant_path: boolean;
use_street_canyon: boolean;
use_reflections: boolean;
use_water_reflection: boolean;
use_vegetation: boolean;
use_atmospheric: boolean;
// Site params (first site or average)
frequency: number;
txPower: number;
antennaGain: number;
antennaHeight: number;
// Environmental
season: string;
rain_rate: number;
indoor_loss_type: string;
fading_margin: number;
}
export interface CalculationEntry { export interface CalculationEntry {
id: string; id: string;
timestamp: Date; timestamp: Date;
@@ -12,6 +36,8 @@ export interface CalculationEntry {
avgRsrp: number; avgRsrp: number;
rangeMin: number; rangeMin: number;
rangeMax: number; rangeMax: number;
// Propagation snapshot for detailed history
propagation?: PropagationSnapshot;
} }
interface CalcHistoryState { interface CalcHistoryState {

View File

@@ -5,7 +5,7 @@ import type { WSProgress } from '@/services/websocket.ts';
import { useSitesStore } from '@/store/sites.ts'; import { useSitesStore } from '@/store/sites.ts';
import { useToastStore } from '@/components/ui/Toast.tsx'; import { useToastStore } from '@/components/ui/Toast.tsx';
import { useCalcHistoryStore } from '@/store/calcHistory.ts'; import { useCalcHistoryStore } from '@/store/calcHistory.ts';
import type { CalculationEntry } from '@/store/calcHistory.ts'; import type { CalculationEntry, PropagationSnapshot } from '@/store/calcHistory.ts';
import type { CoverageResult, CoverageSettings, CoverageApiStats } from '@/types/index.ts'; import type { CoverageResult, CoverageSettings, CoverageApiStats } from '@/types/index.ts';
import type { ApiSiteParams, CoverageResponse } from '@/services/api.ts'; import type { ApiSiteParams, CoverageResponse } from '@/services/api.ts';
@@ -98,6 +98,7 @@ function responseToResult(response: CoverageResponse, settings: CoverageSettings
settings: settings, settings: settings,
stats: response.stats as CoverageApiStats, stats: response.stats as CoverageApiStats,
modelsUsed: response.models_used, modelsUsed: response.models_used,
boundary: response.boundary,
}; };
} }
@@ -119,6 +120,32 @@ function buildHistoryEntry(result: CoverageResult): CalculationEntry {
const avgRsrp = result.stats?.avg_rsrp const avgRsrp = result.stats?.avg_rsrp
?? (total > 0 ? result.points.reduce((s, p) => s + p.rsrp, 0) / total : 0); ?? (total > 0 ? result.points.reduce((s, p) => s + p.rsrp, 0) / total : 0);
// Capture propagation snapshot from settings + sites
const sites = useSitesStore.getState().sites.filter((s) => s.visible);
const firstSite = sites[0];
const settings = result.settings;
const propagation: PropagationSnapshot = {
modelsUsed: result.modelsUsed ?? [],
use_terrain: settings.use_terrain ?? true,
use_buildings: settings.use_buildings ?? true,
use_materials: settings.use_materials ?? true,
use_dominant_path: settings.use_dominant_path ?? false,
use_street_canyon: settings.use_street_canyon ?? false,
use_reflections: settings.use_reflections ?? false,
use_water_reflection: settings.use_water_reflection ?? false,
use_vegetation: settings.use_vegetation ?? false,
use_atmospheric: settings.use_atmospheric ?? false,
frequency: firstSite?.frequency ?? 1800,
txPower: firstSite?.power ?? 43,
antennaGain: firstSite?.gain ?? 18,
antennaHeight: firstSite?.height ?? 30,
season: settings.season ?? 'summer',
rain_rate: settings.rain_rate ?? 0,
indoor_loss_type: settings.indoor_loss_type ?? 'none',
fading_margin: settings.fading_margin ?? 0,
};
return { return {
id: crypto.randomUUID(), id: crypto.randomUUID(),
timestamp: new Date(), timestamp: new Date(),
@@ -136,6 +163,7 @@ function buildHistoryEntry(result: CoverageResult): CalculationEntry {
avgRsrp, avgRsrp,
rangeMin: minRsrp === Infinity ? 0 : minRsrp, rangeMin: minRsrp === Infinity ? 0 : minRsrp,
rangeMax: maxRsrp === -Infinity ? 0 : maxRsrp, rangeMax: maxRsrp === -Infinity ? 0 : maxRsrp,
propagation,
}; };
} }
@@ -190,6 +218,12 @@ export const useCoverageStore = create<CoverageState>((set, get) => ({
setError: (error) => set({ error }), setError: (error) => set({ error }),
calculateCoverage: async () => { calculateCoverage: async () => {
// Guard against duplicate calculations
if (get().isCalculating) {
console.warn('[Coverage] Calculation already in progress, ignoring duplicate request');
return;
}
const { settings } = get(); const { settings } = get();
const sites = useSitesStore.getState().sites; const sites = useSitesStore.getState().sites;
@@ -224,11 +258,14 @@ export const useCoverageStore = create<CoverageState>((set, get) => ({
addToast('No coverage points. Try increasing radius.', 'warning'); addToast('No coverage points. Try increasing radius.', 'warning');
} else { } else {
const timeStr = result.calculationTime.toFixed(1); const timeStr = result.calculationTime.toFixed(1);
const firstSite = useSitesStore.getState().sites.find((s) => s.visible);
const freqStr = firstSite ? ` \u2022 ${firstSite.frequency} MHz` : '';
const presetStr = settings.preset ? ` \u2022 ${settings.preset}` : '';
const modelsStr = result.modelsUsed?.length const modelsStr = result.modelsUsed?.length
? ` \u2022 ${result.modelsUsed.length} models` ? ` \u2022 ${result.modelsUsed.length} models`
: ''; : '';
addToast( addToast(
`Calculated ${result.totalPoints.toLocaleString()} points in ${timeStr}s${modelsStr}`, `${result.totalPoints.toLocaleString()} pts \u2022 ${timeStr}s${presetStr}${freqStr}${modelsStr}`,
'success' 'success'
); );
} }

View File

@@ -3,6 +3,8 @@ import { persist } from 'zustand/middleware';
type Theme = 'light' | 'dark' | 'system'; type Theme = 'light' | 'dark' | 'system';
type CoverageRenderer = 'webgl-texture' | 'webgl-radial' | 'canvas';
interface SettingsState { interface SettingsState {
theme: Theme; theme: Theme;
showTerrain: boolean; showTerrain: boolean;
@@ -13,6 +15,8 @@ interface SettingsState {
showBoundary: boolean; showBoundary: boolean;
showElevationOverlay: boolean; showElevationOverlay: boolean;
elevationOpacity: number; elevationOpacity: number;
useWebGLCoverage: boolean;
coverageRenderer: CoverageRenderer;
setTheme: (theme: Theme) => void; setTheme: (theme: Theme) => void;
setShowBoundary: (show: boolean) => void; setShowBoundary: (show: boolean) => void;
setShowTerrain: (show: boolean) => void; setShowTerrain: (show: boolean) => void;
@@ -22,6 +26,8 @@ interface SettingsState {
setShowElevationInfo: (show: boolean) => void; setShowElevationInfo: (show: boolean) => void;
setShowElevationOverlay: (show: boolean) => void; setShowElevationOverlay: (show: boolean) => void;
setElevationOpacity: (opacity: number) => void; setElevationOpacity: (opacity: number) => void;
setUseWebGLCoverage: (use: boolean) => void;
setCoverageRenderer: (renderer: CoverageRenderer) => void;
} }
function applyTheme(theme: Theme) { function applyTheme(theme: Theme) {
@@ -47,6 +53,8 @@ export const useSettingsStore = create<SettingsState>()(
showBoundary: false, showBoundary: false,
showElevationOverlay: false, showElevationOverlay: false,
elevationOpacity: 0.5, elevationOpacity: 0.5,
useWebGLCoverage: true, // Default to WebGL smooth rendering
coverageRenderer: 'webgl-radial' as CoverageRenderer, // Default to radial gradients
setTheme: (theme: Theme) => { setTheme: (theme: Theme) => {
set({ theme }); set({ theme });
applyTheme(theme); applyTheme(theme);
@@ -59,9 +67,24 @@ export const useSettingsStore = create<SettingsState>()(
setShowBoundary: (show: boolean) => set({ showBoundary: show }), setShowBoundary: (show: boolean) => set({ showBoundary: show }),
setShowElevationOverlay: (show: boolean) => set({ showElevationOverlay: show }), setShowElevationOverlay: (show: boolean) => set({ showElevationOverlay: show }),
setElevationOpacity: (opacity: number) => set({ elevationOpacity: opacity }), setElevationOpacity: (opacity: number) => set({ elevationOpacity: opacity }),
setUseWebGLCoverage: (use: boolean) => set({ useWebGLCoverage: use }),
setCoverageRenderer: (renderer: CoverageRenderer) => set({ coverageRenderer: renderer }),
}), }),
{ {
name: 'rfcp-settings', name: 'rfcp-settings',
version: 3, // v3: Add coverageRenderer setting
migrate: (persistedState: unknown, version: number) => {
const state = persistedState as Partial<SettingsState>;
if (version < 2) {
// v2: Reset useWebGLCoverage to true (was stuck on false from early WebGL failures)
state.useWebGLCoverage = true;
}
if (version < 3) {
// v3: Add coverageRenderer, default to radial
state.coverageRenderer = 'webgl-radial';
}
return state as SettingsState;
},
} }
) )
); );

View File

@@ -64,6 +64,7 @@ interface SitesState {
batchAdjustTilt: (delta: number) => Promise<void>; batchAdjustTilt: (delta: number) => Promise<void>;
batchSetTilt: (tilt: number) => Promise<void>; batchSetTilt: (tilt: number) => Promise<void>;
batchSetFrequency: (frequency: number) => Promise<void>; batchSetFrequency: (frequency: number) => Promise<void>;
setAllSitesFrequency: (frequency: number) => Promise<void>;
} }
export const useSitesStore = create<SitesState>((set, get) => ({ export const useSitesStore = create<SitesState>((set, get) => ({
@@ -584,4 +585,30 @@ export const useSitesStore = create<SitesState>((set, get) => ({
set({ sites: updatedSites }); set({ sites: updatedSites });
useCoverageStore.getState().clearCoverage(); useCoverageStore.getState().clearCoverage();
}, },
setAllSitesFrequency: async (frequency: number) => {
const { sites } = get();
if (sites.length === 0) return;
pushSnapshot('set all sites frequency', sites);
const clamped = Math.max(100, Math.min(6000, frequency));
const now = new Date();
const updatedSites = sites.map((site) => ({
...site,
frequency: clamped,
updatedAt: now,
}));
for (const site of updatedSites) {
await db.sites.put({
id: site.id,
data: JSON.stringify(site),
createdAt: site.createdAt.getTime(),
updatedAt: now.getTime(),
});
}
set({ sites: updatedSites });
useCoverageStore.getState().clearCoverage();
},
})); }));

View File

@@ -0,0 +1,26 @@
/**
* Tool Mode Store
*
* Single source of truth for which tool is currently active.
* Only the active tool receives map click events.
*/
import { create } from 'zustand';
export type ActiveTool =
| 'none' // Default — pan/zoom only, no click actions
| 'ruler' // Distance measurement, click to add points
| 'rx-placement' // Link Budget RX point, single click
| 'site-placement'; // Place new site on map
interface ToolState {
activeTool: ActiveTool;
setActiveTool: (tool: ActiveTool) => void;
clearTool: () => void;
}
export const useToolStore = create<ToolState>((set) => ({
activeTool: 'none',
setActiveTool: (tool) => set({ activeTool: tool }),
clearTool: () => set({ activeTool: 'none' }),
}));

View File

@@ -15,6 +15,11 @@ export interface CoveragePoint {
atmospheric_loss?: number; // dB atmospheric absorption atmospheric_loss?: number; // dB atmospheric absorption
} }
export interface BoundaryPoint {
lat: number;
lon: number;
}
export interface CoverageResult { export interface CoverageResult {
points: CoveragePoint[]; points: CoveragePoint[];
calculationTime: number; // seconds (was ms for browser calc) calculationTime: number; // seconds (was ms for browser calc)
@@ -23,6 +28,7 @@ export interface CoverageResult {
// API-provided fields // API-provided fields
stats?: CoverageApiStats; stats?: CoverageApiStats;
modelsUsed?: string[]; modelsUsed?: string[];
boundary?: BoundaryPoint[]; // server-computed coverage boundary
} }
export interface CoverageApiStats { export interface CoverageApiStats {

View File

@@ -5,5 +5,6 @@ export type {
CoverageSettings, CoverageSettings,
CoverageApiStats, CoverageApiStats,
GridPoint, GridPoint,
BoundaryPoint,
} from './coverage.ts'; } from './coverage.ts';
export type { FrequencyBand } from './frequency.ts'; export type { FrequencyBand } from './frequency.ts';

41
install.bat Normal file
View File

@@ -0,0 +1,41 @@
@echo off
title RFCP - First Time Setup
echo ============================================
echo RFCP - RF Coverage Planner - Setup
echo ============================================
echo.
REM Check if Python exists
python --version >nul 2>&1
if errorlevel 1 (
echo ERROR: Python not found!
echo.
echo Please install Python 3.10+ from:
echo https://www.python.org/downloads/
echo.
echo Make sure to check "Add Python to PATH" during installation.
echo.
pause
exit /b 1
)
echo Python found:
python --version
echo.
REM Change to script directory
cd /d "%~dp0"
REM Run installer
echo Running RFCP installer...
echo.
python install_rfcp.py
echo.
echo ============================================
echo Setup complete!
echo.
echo To start RFCP, run: RFCP.bat
echo Then open: http://localhost:8090
echo ============================================
pause

498
install_rfcp.py Normal file
View File

@@ -0,0 +1,498 @@
#!/usr/bin/env python3
"""
RFCP Installer — Detects hardware, installs dependencies, sets up GPU acceleration.
Usage:
python install_rfcp.py
The installer handles:
- Python dependency installation
- GPU detection (NVIDIA/Intel/AMD)
- GPU acceleration setup (CuPy for CUDA, PyOpenCL for Intel/AMD)
- Frontend build (if Node.js available)
- Verification of installation
"""
import subprocess
import sys
import platform
import os
import shutil
def print_header(text: str):
"""Print section header."""
print(f"\n{'=' * 60}")
print(f" {text}")
print('=' * 60)
def print_step(text: str):
"""Print step indicator."""
print(f"\n>>> {text}")
def check_python() -> bool:
"""Verify Python 3.10+ is available."""
version = sys.version_info
if version.major < 3 or version.minor < 10:
print(f"[X] Python 3.10+ required, found {version.major}.{version.minor}")
return False
print(f"[OK] Python {version.major}.{version.minor}.{version.micro}")
return True
def check_node() -> bool:
"""Verify Node.js 18+ is available."""
try:
result = subprocess.run(
["node", "--version"],
capture_output=True,
text=True,
timeout=10
)
version = result.stdout.strip().lstrip('v')
major = int(version.split('.')[0])
if major < 18:
print(f"[!] Node.js 18+ recommended, found {version}")
return False
print(f"[OK] Node.js {version}")
return True
except FileNotFoundError:
print("[!] Node.js not found (frontend build will be skipped)")
return False
except Exception as e:
print(f"[!] Node.js check failed: {e}")
return False
def detect_gpu() -> dict:
"""Detect available GPU hardware."""
gpus = {
"nvidia": False,
"nvidia_name": "",
"nvidia_memory_mb": 0,
"intel": False,
"intel_name": "",
"amd": False,
"amd_name": ""
}
# Check NVIDIA via nvidia-smi
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total",
"--format=csv,noheader"],
capture_output=True,
text=True,
timeout=10
)
if result.returncode == 0 and result.stdout.strip():
info = result.stdout.strip()
parts = info.split(",")
gpus["nvidia"] = True
gpus["nvidia_name"] = parts[0].strip()
if len(parts) >= 3:
mem_str = parts[2].strip().replace(" MiB", "").replace(" MB", "")
try:
gpus["nvidia_memory_mb"] = int(mem_str)
except ValueError:
pass
print(f"[OK] NVIDIA GPU: {gpus['nvidia_name']}")
except FileNotFoundError:
pass
except subprocess.TimeoutExpired:
print("[!] nvidia-smi timed out")
except Exception as e:
print(f"[!] NVIDIA detection error: {e}")
# Check Intel/AMD via WMI (Windows) or lspci (Linux)
if platform.system() == "Windows":
try:
result = subprocess.run(
["wmic", "path", "win32_videocontroller", "get",
"name", "/format:csv"],
capture_output=True,
text=True,
timeout=10
)
for line in result.stdout.strip().split('\n'):
line_lower = line.lower()
if 'intel' in line_lower and ('uhd' in line_lower or 'iris' in line_lower or 'hd graphics' in line_lower):
gpus["intel"] = True
# Extract name from CSV
parts = line.split(',')
for part in parts:
if 'Intel' in part:
gpus["intel_name"] = part.strip()
break
if gpus["intel_name"]:
print(f"[OK] Intel GPU: {gpus['intel_name']}")
elif 'amd' in line_lower or 'radeon' in line_lower:
gpus["amd"] = True
parts = line.split(',')
for part in parts:
if 'AMD' in part or 'Radeon' in part:
gpus["amd_name"] = part.strip()
break
if gpus["amd_name"]:
print(f"[OK] AMD GPU: {gpus['amd_name']}")
except Exception:
pass
else:
# Linux: use lspci
try:
result = subprocess.run(
["lspci"],
capture_output=True,
text=True,
timeout=10
)
for line in result.stdout.split('\n'):
if 'VGA' in line or 'Display' in line or '3D' in line:
if 'Intel' in line:
gpus["intel"] = True
gpus["intel_name"] = line.split(':')[-1].strip() if ':' in line else "Intel GPU"
print(f"[OK] Intel GPU: {gpus['intel_name']}")
elif 'AMD' in line or 'Radeon' in line:
gpus["amd"] = True
gpus["amd_name"] = line.split(':')[-1].strip() if ':' in line else "AMD GPU"
print(f"[OK] AMD GPU: {gpus['amd_name']}")
except Exception:
pass
if not gpus["nvidia"] and not gpus["intel"] and not gpus["amd"]:
print("[i] No GPU detected - will use CPU (NumPy)")
return gpus
def install_core_dependencies() -> bool:
"""Install core Python dependencies."""
print_step("Installing core dependencies...")
req_file = os.path.join(os.path.dirname(__file__), "backend", "requirements.txt")
if not os.path.exists(req_file):
print(f"[X] requirements.txt not found at {req_file}")
return False
try:
subprocess.run(
[sys.executable, "-m", "pip", "install", "-r", req_file,
"--quiet", "--no-warn-script-location"],
check=True,
timeout=600
)
print("[OK] Core dependencies installed")
return True
except subprocess.CalledProcessError as e:
print(f"[X] pip install failed: {e}")
return False
except subprocess.TimeoutExpired:
print("[X] pip install timed out (10 min)")
return False
def install_gpu_dependencies(gpus: dict) -> bool:
"""Install GPU-specific dependencies based on detected hardware."""
print_step("Setting up GPU acceleration...")
gpu_installed = False
# NVIDIA - install CuPy (includes CUDA runtime)
if gpus["nvidia"]:
print(f" Installing CuPy for {gpus['nvidia_name']}...")
try:
# Try CUDA 12 first (newer cards, RTX 30xx/40xx)
subprocess.run(
[sys.executable, "-m", "pip", "install", "cupy-cuda12x",
"--quiet", "--no-warn-script-location"],
check=True,
timeout=600
)
print(f" [OK] CuPy (CUDA 12) installed")
gpu_installed = True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
try:
# Fallback to CUDA 11 (older cards)
print(" [!] CUDA 12 failed, trying CUDA 11...")
subprocess.run(
[sys.executable, "-m", "pip", "install", "cupy-cuda11x",
"--quiet", "--no-warn-script-location"],
check=True,
timeout=600
)
print(f" [OK] CuPy (CUDA 11) installed")
gpu_installed = True
except Exception as e:
print(f" [X] CuPy installation failed: {e}")
print(f" Manual install: pip install cupy-cuda12x")
# Intel/AMD - install PyOpenCL
if gpus["intel"] or gpus["amd"]:
gpu_name = gpus["intel_name"] or gpus["amd_name"]
print(f" Installing PyOpenCL for {gpu_name}...")
try:
subprocess.run(
[sys.executable, "-m", "pip", "install", "pyopencl",
"--quiet", "--no-warn-script-location"],
check=True,
timeout=300
)
print(f" [OK] PyOpenCL installed")
gpu_installed = True
except Exception as e:
print(f" [X] PyOpenCL installation failed: {e}")
print(f" Manual install: pip install pyopencl")
if not gpu_installed and not gpus["nvidia"] and not gpus["intel"] and not gpus["amd"]:
print(" [i] No GPU acceleration - using CPU (NumPy)")
print(" This is fine! GPU just makes large calculations faster.")
return gpu_installed
def install_frontend(has_node: bool) -> bool:
"""Install frontend dependencies and build."""
if not has_node:
print_step("Skipping frontend build (Node.js not available)")
return False
print_step("Setting up frontend...")
frontend_dir = os.path.join(os.path.dirname(__file__), "frontend")
if not os.path.exists(os.path.join(frontend_dir, "package.json")):
print("[!] Frontend directory not found")
return False
try:
print(" Installing npm packages...")
subprocess.run(
["npm", "install"],
cwd=frontend_dir,
check=True,
timeout=300,
capture_output=True
)
print(" Building frontend...")
subprocess.run(
["npm", "run", "build"],
cwd=frontend_dir,
check=True,
timeout=300,
capture_output=True
)
print("[OK] Frontend built")
return True
except subprocess.CalledProcessError as e:
print(f"[X] Frontend build failed: {e}")
return False
except subprocess.TimeoutExpired:
print("[X] Frontend build timed out")
return False
def create_launcher() -> bool:
"""Create launcher scripts."""
print_step("Creating launcher scripts...")
base_dir = os.path.dirname(os.path.abspath(__file__))
if platform.system() == "Windows":
# Create RFCP.bat
launcher_path = os.path.join(base_dir, "RFCP.bat")
with open(launcher_path, 'w') as f:
f.write('@echo off\n')
f.write('title RFCP - RF Coverage Planner\n')
f.write(f'cd /d "{base_dir}"\n')
f.write('echo Starting RFCP...\n')
f.write('echo Open http://localhost:8090 in your browser\n')
f.write('echo Press Ctrl+C to stop\n')
f.write('echo.\n')
f.write(f'cd backend\n')
f.write(f'"{sys.executable}" -m uvicorn app.main:app --host 0.0.0.0 --port 8090\n')
print(f" [OK] Created: RFCP.bat")
# Create install.bat for first-time setup
install_bat_path = os.path.join(base_dir, "install.bat")
with open(install_bat_path, 'w') as f:
f.write('@echo off\n')
f.write('title RFCP - First Time Setup\n')
f.write('echo ============================================\n')
f.write('echo RFCP - RF Coverage Planner - Setup\n')
f.write('echo ============================================\n')
f.write('echo.\n')
f.write('python --version >nul 2>&1\n')
f.write('if errorlevel 1 (\n')
f.write(' echo ERROR: Python not found!\n')
f.write(' echo Please install Python 3.10+ from python.org\n')
f.write(' pause\n')
f.write(' exit /b 1\n')
f.write(')\n')
f.write(f'cd /d "{base_dir}"\n')
f.write('python install_rfcp.py\n')
f.write('echo.\n')
f.write('echo Setup complete! Run RFCP.bat to start.\n')
f.write('pause\n')
print(f" [OK] Created: install.bat")
else:
# Linux/macOS
launcher_path = os.path.join(base_dir, "rfcp.sh")
with open(launcher_path, 'w') as f:
f.write('#!/bin/bash\n')
f.write(f'cd "{base_dir}"\n')
f.write('echo "Starting RFCP..."\n')
f.write('echo "Open http://localhost:8090 in your browser"\n')
f.write('echo "Press Ctrl+C to stop"\n')
f.write('cd backend\n')
f.write(f'{sys.executable} -m uvicorn app.main:app --host 0.0.0.0 --port 8090\n')
os.chmod(launcher_path, 0o755)
print(f" [OK] Created: rfcp.sh")
return True
def verify_installation() -> bool:
"""Run quick verification tests."""
print_step("Verifying installation...")
checks = []
critical_fail = False
# Check core imports
try:
import numpy as np
checks.append(f"[OK] NumPy {np.__version__}")
except ImportError:
checks.append("[X] NumPy missing")
critical_fail = True
try:
import scipy
checks.append(f"[OK] SciPy {scipy.__version__}")
except ImportError:
checks.append("[X] SciPy missing")
critical_fail = True
try:
import fastapi
checks.append(f"[OK] FastAPI {fastapi.__version__}")
except ImportError:
checks.append("[X] FastAPI missing")
critical_fail = True
try:
import uvicorn
checks.append(f"[OK] Uvicorn {uvicorn.__version__}")
except ImportError:
checks.append("[X] Uvicorn missing")
critical_fail = True
# Check GPU acceleration
try:
import cupy as cp
device_count = cp.cuda.runtime.getDeviceCount()
if device_count > 0:
props = cp.cuda.runtime.getDeviceProperties(0)
name = props["name"]
if isinstance(name, bytes):
name = name.decode()
mem_mb = props["totalGlobalMem"] // (1024 * 1024)
checks.append(f"[OK] CuPy (CUDA) -> {name} ({mem_mb} MB)")
else:
checks.append("[i] CuPy installed but no CUDA devices found")
except ImportError:
checks.append("[i] CuPy not available (NVIDIA GPU acceleration disabled)")
except Exception as e:
checks.append(f"[!] CuPy error: {e}")
try:
import pyopencl as cl
devices = []
for p in cl.get_platforms():
for d in p.get_devices():
devices.append(d.name.strip())
if devices:
checks.append(f"[OK] PyOpenCL -> {', '.join(devices[:2])}")
else:
checks.append("[i] PyOpenCL installed but no devices found")
except ImportError:
checks.append("[i] PyOpenCL not available (Intel/AMD GPU acceleration disabled)")
except Exception as e:
checks.append(f"[!] PyOpenCL error: {e}")
for check in checks:
print(f" {check}")
return not critical_fail
def main():
"""Main installer entry point."""
print_header("RFCP - RF Coverage Planner - Installer")
# Step 1: Check prerequisites
print_step("Checking prerequisites...")
if not check_python():
print("\n[X] Python 3.10+ is required. Please install from python.org")
sys.exit(1)
has_node = check_node()
# Step 2: Detect GPU
print_step("Detecting GPU hardware...")
gpus = detect_gpu()
# Step 3: Install core dependencies
if not install_core_dependencies():
print("\n[X] Core dependency installation failed")
sys.exit(1)
# Step 4: Install GPU dependencies
install_gpu_dependencies(gpus)
# Step 5: Frontend (optional)
install_frontend(has_node)
# Step 6: Create launcher
create_launcher()
# Step 7: Verify
success = verify_installation()
# Summary
print_header("Installation Summary")
if success:
print(" [OK] RFCP installed successfully!")
print()
print(" To start RFCP:")
if platform.system() == "Windows":
print(" Double-click RFCP.bat")
print(" Or run: python -m uvicorn app.main:app --port 8090")
else:
print(" Run: ./rfcp.sh")
print(" Or: python -m uvicorn app.main:app --port 8090")
print()
print(" Then open: http://localhost:8090")
print()
# GPU summary
if gpus["nvidia"]:
print(f" GPU: {gpus['nvidia_name']} (CUDA)")
elif gpus["intel"]:
print(f" GPU: {gpus['intel_name']} (OpenCL)")
elif gpus["amd"]:
print(f" GPU: {gpus['amd_name']} (OpenCL)")
else:
print(" Mode: CPU only (NumPy)")
else:
print(" [!] Installation completed with errors")
print(" Some features may not work correctly")
print()
print('=' * 60)
if __name__ == "__main__":
main()

70
installer/build-gpu.bat Normal file
View File

@@ -0,0 +1,70 @@
@echo off
echo ========================================
echo RFCP GPU Build — ONEDIR mode
echo CuPy-cuda13x + CUDA Toolkit 13.x
echo ========================================
echo.
REM ── Check CuPy ──
echo [1/5] Checking CuPy installation...
python -c "import cupy; print(f' CuPy {cupy.__version__}')" 2>nul
if errorlevel 1 (
echo ERROR: CuPy not installed.
echo Run: pip install cupy-cuda13x
exit /b 1
)
REM ── Check CUDA compute ──
echo [2/5] Testing GPU compute...
python -c "import cupy; a = cupy.array([1,2,3]); assert a.sum() == 6; print(' GPU compute: OK')" 2>nul
if errorlevel 1 (
echo ERROR: CuPy installed but GPU compute failed.
echo Check: CUDA Toolkit installed? nvidia-smi works?
exit /b 1
)
REM ── Check CUDA_PATH ──
echo [3/5] Checking CUDA Toolkit...
if defined CUDA_PATH (
echo CUDA_PATH: %CUDA_PATH%
) else (
echo WARNING: CUDA_PATH not set
)
REM ── Check nvidia pip DLLs ──
echo [4/5] Checking nvidia pip packages...
python -c "import nvidia; import os; base=os.path.dirname(nvidia.__file__); dlls=[f for d in os.listdir(base) if os.path.isdir(os.path.join(base,d,'bin')) for f in os.listdir(os.path.join(base,d,'bin')) if f.endswith('.dll')]; print(f' nvidia pip DLLs: {len(dlls)}')" 2>nul
if errorlevel 1 (
echo No nvidia pip packages (will use CUDA Toolkit)
)
REM ── Build ──
echo.
echo [5/5] Building rfcp-server (ONEDIR mode)...
echo This may take 3-5 minutes...
echo.
cd /d "%~dp0\..\backend"
pyinstaller "..\installer\rfcp-server-gpu.spec" --clean --noconfirm
echo.
echo ========================================
if exist "dist\rfcp-server\rfcp-server.exe" (
echo BUILD COMPLETE! (ONEDIR mode)
echo.
echo Output: backend\dist\rfcp-server\
dir /b dist\rfcp-server\*.exe dist\rfcp-server\*.dll 2>nul | find /c /v "" > nul
echo.
echo Test commands:
echo cd dist\rfcp-server
echo rfcp-server.exe
echo curl http://localhost:8090/api/health
echo curl http://localhost:8090/api/gpu/status
echo ========================================
) else (
echo BUILD FAILED — check errors above
echo ========================================
exit /b 1
)
pause

84
installer/build-gpu.sh Normal file
View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
echo "========================================"
echo " RFCP GPU Build — ONEDIR mode"
echo " CuPy-cuda13x + CUDA Toolkit 13.x"
echo "========================================"
echo ""
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
BACKEND_DIR="$SCRIPT_DIR/../backend"
# Check backend exists
if [ ! -f "$BACKEND_DIR/run_server.py" ]; then
echo "ERROR: Backend not found at $BACKEND_DIR"
exit 1
fi
# Check Python
echo "[1/5] Checking Python..."
python3 --version || { echo "ERROR: Python3 not found"; exit 1; }
# Check CuPy
echo ""
echo "[2/5] Checking CuPy installation..."
if ! python3 -c "import cupy; print(f' CuPy {cupy.__version__}')" 2>/dev/null; then
echo "ERROR: CuPy not installed"
echo ""
echo "Install CuPy:"
echo " pip3 install cupy-cuda13x"
echo " # or for WSL2:"
echo " pip3 install cupy-cuda13x --break-system-packages"
exit 1
fi
# Check GPU compute
echo ""
echo "[3/5] Testing GPU compute..."
if python3 -c "import cupy; a = cupy.array([1,2,3]); assert a.sum() == 6; print(' GPU compute: OK')" 2>/dev/null; then
:
else
echo "WARNING: GPU compute test failed (may still work)"
fi
# Check CUDA
echo ""
echo "[4/5] Checking CUDA..."
if [ -n "$CUDA_PATH" ]; then
echo " CUDA_PATH: $CUDA_PATH"
else
echo " CUDA_PATH not set (relying on nvidia pip packages)"
fi
# Check nvidia pip packages
echo ""
echo "[5/5] Checking nvidia pip packages..."
python3 -c "import nvidia; print(' nvidia packages found')" 2>/dev/null || echo " No nvidia pip packages"
# Build
echo ""
echo "Building rfcp-server (ONEDIR mode)..."
echo ""
cd "$BACKEND_DIR"
pyinstaller "$SCRIPT_DIR/rfcp-server-gpu.spec" --clean --noconfirm
echo ""
echo "========================================"
if [ -f "dist/rfcp-server/rfcp-server" ] || [ -f "dist/rfcp-server/rfcp-server.exe" ]; then
echo " BUILD COMPLETE! (ONEDIR mode)"
echo ""
echo " Output: backend/dist/rfcp-server/"
ls -lh dist/rfcp-server/ | head -20
echo ""
echo " Test:"
echo " cd dist/rfcp-server"
echo " ./rfcp-server"
echo " curl http://localhost:8090/api/health"
echo "========================================"
else
echo " BUILD FAILED — check errors above"
echo "========================================"
exit 1
fi

View File

@@ -3,6 +3,7 @@ set -e
echo "=========================================" echo "========================================="
echo " RFCP Desktop Build (Windows)" echo " RFCP Desktop Build (Windows)"
echo " GPU-enabled ONEDIR build"
echo "=========================================" echo "========================================="
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
@@ -14,15 +15,30 @@ npm ci
npm run build npm run build
cd .. cd ..
# 2. Build backend with PyInstaller # 2. Build backend with PyInstaller (GPU ONEDIR mode)
echo "[2/4] Building backend..." echo "[2/4] Building backend (GPU)..."
cd backend cd backend
# Check CuPy is available
if ! python -c "import cupy" 2>/dev/null; then
echo "WARNING: CuPy not installed - GPU acceleration will not be available"
echo " Install with: pip install cupy-cuda13x"
fi
python -m pip install -r requirements.txt python -m pip install -r requirements.txt
python -m pip install pyinstaller python -m pip install pyinstaller
cd ../installer
python -m PyInstaller rfcp-server.spec --clean --noconfirm # Build using GPU spec (ONEDIR output)
python -m PyInstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
# Copy ONEDIR folder to desktop staging area
# Result: desktop/backend-dist/win/rfcp-server/rfcp-server.exe + _internal/
mkdir -p ../desktop/backend-dist/win mkdir -p ../desktop/backend-dist/win
cp dist/rfcp-server.exe ../desktop/backend-dist/win/ rm -rf ../desktop/backend-dist/win/rfcp-server # Clean old build
cp -r dist/rfcp-server ../desktop/backend-dist/win/rfcp-server
echo " Backend copied to: desktop/backend-dist/win/rfcp-server/"
ls -la ../desktop/backend-dist/win/rfcp-server/*.exe 2>/dev/null || true
cd .. cd ..
# 3. Build Electron app # 3. Build Electron app

6
installer/package-lock.json generated Normal file
View File

@@ -0,0 +1,6 @@
{
"name": "installer",
"lockfileVersion": 3,
"requires": true,
"packages": {}
}

1
installer/package.json Normal file
View File

@@ -0,0 +1 @@
{}

View File

@@ -0,0 +1,305 @@
# rfcp-server-gpu.spec — GPU-enabled build (CuPy + CUDA 13.x)
# RFCP Iteration 3.6.0
#
# Mode: ONEDIR (directory output, not single exe)
# This is better for CUDA — DLLs load directly without temp extraction
#
# Requirements:
# pip install cupy-cuda13x fastrlock pyinstaller
# CUDA Toolkit 13.x installed (winget install Nvidia.CUDA)
#
# Build:
# cd backend && pyinstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
#
# Output:
# backend/dist/rfcp-server/rfcp-server.exe (+ DLLs in same folder)
import os
import sys
import glob
from PyInstaller.utils.hooks import collect_all, collect_dynamic_libs
backend_path = os.path.abspath(os.path.join(os.path.dirname(SPEC), '..', 'backend'))
print(f"[GPU SPEC] Backend path: {backend_path}")
# ═══════════════════════════════════════════
# Collect CuPy packages
# ═══════════════════════════════════════════
cupy_datas = []
cupy_binaries = []
cupy_hiddenimports = []
cupyb_datas = []
cupyb_binaries = []
cupyb_hiddenimports = []
try:
cupy_datas, cupy_binaries, cupy_hiddenimports = collect_all('cupy')
cupyb_datas, cupyb_binaries, cupyb_hiddenimports = collect_all('cupy_backends')
print(f"[GPU SPEC] CuPy: {len(cupy_binaries)} binaries, {len(cupy_datas)} data files")
except Exception as e:
print(f"[GPU SPEC] WARNING: CuPy collection failed: {e}")
# NOTE: nvidia pip packages REMOVED - they have cuda12 DLLs that conflict with cupy-cuda13x
# We use CUDA Toolkit 13.x DLLs only
# ═══════════════════════════════════════════
# Collect CUDA Toolkit DLLs (system install)
# ═══════════════════════════════════════════
# Installed via: winget install Nvidia.CUDA
cuda_toolkit_binaries = []
cuda_path = os.environ.get('CUDA_PATH', '')
if cuda_path:
# Scan BOTH bin\ and bin\x64\ directories
cuda_bin_dirs = [
os.path.join(cuda_path, 'bin'),
os.path.join(cuda_path, 'bin', 'x64'),
]
# Only essential CUDA runtime DLLs (exclude NPP, nvjpeg, nvblas, nvfatbin)
cuda_dll_patterns = [
'cublas64_*.dll',
'cublasLt64_*.dll',
'cudart64_*.dll',
'cufft64_*.dll',
'cufftw64_*.dll',
'curand64_*.dll',
'cusolver64_*.dll',
'cusolverMg64_*.dll',
'cusparse64_*.dll',
'nvrtc64_*.dll',
'nvrtc-builtins64_*.dll',
'nvJitLink_*.dll',
'nvjitlink_*.dll',
]
collected_dlls = set() # Avoid duplicates
for cuda_bin in cuda_bin_dirs:
if os.path.isdir(cuda_bin):
for pattern in cuda_dll_patterns:
for dll in glob.glob(os.path.join(cuda_bin, pattern)):
dll_name = os.path.basename(dll)
if dll_name not in collected_dlls:
cuda_toolkit_binaries.append((dll, '.'))
collected_dlls.add(dll_name)
print(f"[GPU SPEC] Scanned: {cuda_bin}")
print(f"[GPU SPEC] CUDA Toolkit ({cuda_path}): {len(cuda_toolkit_binaries)} DLLs")
for dll, _ in cuda_toolkit_binaries:
print(f"[GPU SPEC] {os.path.basename(dll)}")
else:
print("[GPU SPEC] ERROR: CUDA_PATH not set!")
print("[GPU SPEC] Install: winget install Nvidia.CUDA")
# All GPU binaries (CUDA Toolkit only, no nvidia pip packages)
all_gpu_binaries = cuda_toolkit_binaries
if len(all_gpu_binaries) == 0:
print("[GPU SPEC] NO CUDA DLLs FOUND!")
print("[GPU SPEC] Install CUDA Toolkit: winget install Nvidia.CUDA")
else:
print(f"[GPU SPEC] Total GPU DLLs: {len(all_gpu_binaries)}")
# ═══════════════════════════════════════════
# Collect fastrlock (CuPy dependency)
# ═══════════════════════════════════════════
fl_datas = []
fl_binaries = []
fl_hiddenimports = []
try:
fl_datas, fl_binaries, fl_hiddenimports = collect_all('fastrlock')
print(f"[GPU SPEC] fastrlock: {len(fl_binaries)} binaries")
except Exception:
print("[GPU SPEC] fastrlock not found (optional)")
# ═══════════════════════════════════════════
# PyInstaller Analysis
# ═══════════════════════════════════════════
a = Analysis(
[os.path.join(backend_path, 'run_server.py')],
pathex=[backend_path],
binaries=(
cupy_binaries + cupyb_binaries +
fl_binaries + all_gpu_binaries
),
datas=[
# Include app/ source code
(os.path.join(backend_path, 'app'), 'app'),
] + cupy_datas + cupyb_datas + fl_datas,
hiddenimports=[
# ── Uvicorn internals ──
'uvicorn.logging',
'uvicorn.loops',
'uvicorn.loops.auto',
'uvicorn.loops.asyncio',
'uvicorn.protocols',
'uvicorn.protocols.http',
'uvicorn.protocols.http.auto',
'uvicorn.protocols.http.h11_impl',
'uvicorn.protocols.http.httptools_impl',
'uvicorn.protocols.websockets',
'uvicorn.protocols.websockets.auto',
'uvicorn.protocols.websockets.wsproto_impl',
'uvicorn.lifespan',
'uvicorn.lifespan.on',
'uvicorn.lifespan.off',
# ── FastAPI / Starlette ──
'fastapi',
'fastapi.middleware',
'fastapi.middleware.cors',
'fastapi.routing',
'fastapi.responses',
'fastapi.exceptions',
'starlette',
'starlette.routing',
'starlette.middleware',
'starlette.middleware.cors',
'starlette.responses',
'starlette.requests',
'starlette.concurrency',
'starlette.formparsers',
'starlette.staticfiles',
# ── Pydantic ──
'pydantic',
'pydantic.fields',
'pydantic_settings',
'pydantic_core',
# ── HTTP / networking ──
'httpx',
'httpcore',
'h11',
'httptools',
'anyio',
'anyio._backends',
'anyio._backends._asyncio',
'sniffio',
# ── MongoDB (motor/pymongo) ──
'motor',
'motor.motor_asyncio',
'pymongo',
'pymongo.errors',
'pymongo.collection',
'pymongo.database',
'pymongo.mongo_client',
# ── Async I/O ──
'aiofiles',
'aiofiles.os',
'aiofiles.ospath',
# ── Scientific ──
'numpy',
'numpy.core',
'scipy',
'scipy.special',
'scipy.interpolate',
'shapely',
'shapely.geometry',
'shapely.ops',
# ── Multipart ──
'multipart',
'python_multipart',
# ── Encoding ──
'email.mime',
'email.mime.multipart',
# ── Multiprocessing ──
'multiprocessing',
'multiprocessing.pool',
'multiprocessing.queues',
'concurrent.futures',
# ── CuPy + CUDA ──
'cupy',
'cupy.cuda',
'cupy.cuda.runtime',
'cupy.cuda.driver',
'cupy.cuda.memory',
'cupy.cuda.stream',
'cupy.cuda.device',
'cupy._core',
'cupy._core.core',
'cupy._core._routines_math',
'cupy._core._routines_logic',
'cupy._core._routines_manipulation',
'cupy._core._routines_sorting',
'cupy._core._routines_statistics',
'cupy._core._cub_reduction',
'cupy.fft',
'cupy.linalg',
'cupy.random',
'cupy_backends',
'cupy_backends.cuda',
'cupy_backends.cuda.api',
'cupy_backends.cuda.libs',
'fastrlock',
'fastrlock.rlock',
] + cupy_hiddenimports + cupyb_hiddenimports + fl_hiddenimports,
hookspath=[],
hooksconfig={},
runtime_hooks=[os.path.join(os.path.dirname(SPEC), 'rthook_cuda_dlls.py')],
# ── Exclude bloat ──
excludes=[
# GUI
'tkinter',
'matplotlib',
'PIL',
'IPython',
# Data science bloat
'pandas',
'tensorflow',
'torch',
'keras',
# Testing
'pytest',
# Jupyter
'jupyter',
'notebook',
'ipykernel',
# gRPC / telemetry (often pulled in by dependencies)
'grpc',
'grpcio',
'google.protobuf',
'opentelemetry',
'opentelemetry.sdk',
'opentelemetry.instrumentation',
# Ray (too heavy, we use multiprocessing)
'ray',
# Other
'cv2',
'sklearn',
'sympy',
],
noarchive=False,
)
pyz = PYZ(a.pure)
# ═══════════════════════════════════════════
# ONEDIR mode: EXE + COLLECT
# ═══════════════════════════════════════════
# Creates: dist/rfcp-server/rfcp-server.exe + all DLLs in same folder
# Better for CUDA — no temp extraction needed
exe = EXE(
pyz,
a.scripts,
[], # No binaries/datas in EXE — they go in COLLECT
exclude_binaries=True, # ONEDIR mode
name='rfcp-server',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=False, # Don't compress — CUDA libs need fast loading
console=True,
icon=os.path.join(os.path.dirname(SPEC), 'rfcp.ico') if os.path.exists(os.path.join(os.path.dirname(SPEC), 'rfcp.ico')) else None,
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=False,
upx_exclude=[],
name='rfcp-server',
)

BIN
installer/rfcp.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

View File

@@ -0,0 +1,24 @@
# PyInstaller runtime hook for CUDA DLL loading
# Must run BEFORE any CuPy import
#
# Problem: Windows Python 3.8+ requires os.add_dll_directory() for DLL search
# PyInstaller ONEDIR mode puts DLLs in _internal/ which isn't in the search path
import os
import sys
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
# _MEIPASS points to _internal/ in ONEDIR mode
base = getattr(sys, '_MEIPASS', None)
if base and os.path.isdir(base):
os.add_dll_directory(base)
print(f"[CUDA DLL Hook] Added DLL directory: {base}")
# Also add CUDA_PATH if available (fallback to system CUDA)
cuda_path = os.environ.get('CUDA_PATH', '')
if cuda_path:
for subdir in ['bin', os.path.join('bin', 'x64')]:
d = os.path.join(cuda_path, subdir)
if os.path.isdir(d):
os.add_dll_directory(d)
print(f"[CUDA DLL Hook] Added CUDA_PATH: {d}")

7
rfcp-backend.log.txt Normal file
View File

@@ -0,0 +1,7 @@
[CUDA DLL Hook] Added DLL directory: C:\Program Files\RFCP\resources\backend\rfcp-server\_internal
[RFCP] run_server.py starting...
[RFCP] Frozen mode, base dir: C:\Program Files\RFCP\resources\backend\rfcp-server
[err] Traceback (most recent call last):
File "run_server.py", line 33, in <module>
[err] PermissionError: [Errno 13] Permission denied: 'C:\\Program Files\\RFCP\\resources\\backend\\rfcp-server\\rfcp-server.log'
[PYI-17768:ERROR] Failed to execute script 'run_server' due to unhandled exception!

64
rfcp-gpu-preflight.bat Normal file
View File

@@ -0,0 +1,64 @@
@echo off
echo ========================================
echo RFCP GPU Build — Pre-flight Check
echo ========================================
echo.
echo [1] Python version:
python --version
echo.
echo [2] CuPy status:
python -c "import cupy; print(f' CuPy {cupy.__version__}')"
python -c "import cupy; d=cupy.cuda.Device(0); print(f' Device: {d.id}'); print(f' Memory: {d.mem_info[1]//1024//1024} MB')"
echo.
echo [3] CUDA runtime version:
python -c "import cupy; v=cupy.cuda.runtime.runtimeGetVersion(); print(f' CUDA Runtime: {v}')"
echo.
echo [4] CUDA_PATH environment:
if defined CUDA_PATH (
echo CUDA_PATH = %CUDA_PATH%
) else (
echo WARNING: CUDA_PATH not set!
echo.
echo Checking common locations...
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" (
for /d %%i in ("C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v*") do (
echo Found: %%i
echo.
echo To fix, run:
echo setx CUDA_PATH "%%i"
echo Then restart terminal.
)
) else (
echo No CUDA Toolkit found in default location.
echo CuPy bundles its own CUDA runtime, so this may be OK.
echo But PyInstaller build might need it.
)
)
echo.
echo [5] nvidia-smi:
nvidia-smi --query-gpu=name,driver_version,memory.total --format=csv,noheader 2>nul
if errorlevel 1 echo nvidia-smi not found in PATH
echo.
echo [6] CuPy CUDA libs location:
python -c "import cupy; import os; print(f' {os.path.dirname(cupy.__file__)}')"
python -c "import cupy._core.core" 2>nul && echo cupy._core.core: OK || echo cupy._core.core: FAILED
echo.
echo [7] fastrlock:
python -c "import fastrlock; print(f' fastrlock {fastrlock.__version__}')"
echo.
echo [8] PyInstaller:
python -c "import PyInstaller; print(f' PyInstaller {PyInstaller.__version__}')" 2>nul || echo PyInstaller NOT installed! Run: pip install pyinstaller
echo.
echo ========================================
echo Pre-flight complete
echo ========================================
pause

43
rfcp-main.log.txt Normal file
View File

@@ -0,0 +1,43 @@
[2026-02-04T18:01:12.043Z] Log file: C:\Users\ether\AppData\Roaming\rfcp-desktop\logs\rfcp-main.log
[2026-02-04T18:01:12.049Z] Platform: win32, Electron: 28.3.3
[2026-02-04T18:01:12.049Z] isDev: false
[2026-02-04T18:01:12.050Z] userData: C:\Users\ether\AppData\Roaming\rfcp-desktop
[2026-02-04T18:01:12.050Z] resourcesPath: C:\Program Files\RFCP\resources
[2026-02-04T18:01:12.145Z] Data path: C:\Users\ether\AppData\Roaming\rfcp-desktop\data
[2026-02-04T18:01:12.146Z] Starting production backend: C:\Program Files\RFCP\resources\backend\rfcp-server\rfcp-server.exe
[2026-02-04T18:01:12.146Z] Backend cwd: C:\Program Files\RFCP\resources\backend\rfcp-server
[2026-02-04T18:01:12.578Z] Backend PID: 17768
[2026-02-04T18:01:13.206Z] [Backend] [CUDA DLL Hook] Added DLL directory: C:\Program Files\RFCP\resources\backend\rfcp-server\_internal
[RFCP] run_server.py starting...
[RFCP] Frozen mode, base dir: C:\Program Files\RFCP\resources\backend\rfcp-server
[2026-02-04T18:01:13.214Z] [Backend:err] Traceback (most recent call last):
File "run_server.py", line 33, in <module>
[2026-02-04T18:01:13.215Z] [Backend:err] PermissionError: [Errno 13] Permission denied: 'C:\\Program Files\\RFCP\\resources\\backend\\rfcp-server\\rfcp-server.log'
[PYI-17768:ERROR] Failed to execute script 'run_server' due to unhandled exception!
[2026-02-04T18:01:13.300Z] Backend exited: code=1, signal=null
[2026-02-04T18:01:42.980Z] Backend failed to start within 30s
[2026-02-04T18:02:08.444Z] [CLOSE] before-quit fired, isQuitting=false
[2026-02-04T18:02:08.445Z] [SHUTDOWN] Starting graceful shutdown...
[2026-02-04T18:02:08.447Z] [SHUTDOWN] Backend did not respond — force killing
[2026-02-04T18:02:08.447Z] [KILL] killBackend() called, platform=win32, PID=17768
[2026-02-04T18:02:08.447Z] [KILL] Running: taskkill /F /T /PID 17768
[2026-02-04T18:02:09.459Z] [KILL] Primary kill failed: Command failed: taskkill /F /T /PID 17768, trying SIGKILL fallback
[2026-02-04T18:02:09.459Z] [KILL] Fallback SIGKILL sent via process handle
[2026-02-04T18:02:09.459Z] [KILL] Backend cleanup complete (PID was 17768)
[2026-02-04T18:02:09.459Z] [KILL] === Starting aggressive kill ===
[2026-02-04T18:02:09.459Z] [KILL] Strategy 1: taskkill /F /IM
[2026-02-04T18:02:09.636Z] [KILL] Strategy 1: No processes or already killed
[2026-02-04T18:02:09.636Z] [KILL] Strategy 3: PowerShell Stop-Process
[2026-02-04T18:02:10.206Z] [KILL] Strategy 3: PowerShell failed or no processes
[2026-02-04T18:02:10.206Z] [KILL] Strategy 4: PowerShell CimInstance Terminate
[2026-02-04T18:02:10.691Z] [KILL] Strategy 4: SUCCESS
[2026-02-04T18:02:10.691Z] [KILL] === Kill sequence complete ===
[2026-02-04T18:02:11.194Z] [SHUTDOWN] Shutdown complete
[2026-02-04T18:02:11.214Z] [KILL] === Starting aggressive kill ===
[2026-02-04T18:02:11.214Z] [KILL] Strategy 1: taskkill /F /IM
[2026-02-04T18:02:11.378Z] [KILL] Strategy 1: No processes or already killed
[2026-02-04T18:02:11.378Z] [KILL] Strategy 3: PowerShell Stop-Process
[2026-02-04T18:02:11.655Z] [KILL] Strategy 3: PowerShell failed or no processes
[2026-02-04T18:02:11.655Z] [KILL] Strategy 4: PowerShell CimInstance Terminate
[2026-02-04T18:02:12.087Z] [KILL] Strategy 4: SUCCESS
[2026-02-04T18:02:12.087Z] [KILL] === Kill sequence complete ===

24
rfcp.sh Normal file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
# RFCP - RF Coverage Planner - Launcher
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
# Check if backend exists
if [ ! -f "backend/app/main.py" ]; then
echo "ERROR: RFCP backend not found."
echo "Run: python install_rfcp.py"
exit 1
fi
echo "============================================"
echo " RFCP - RF Coverage Planner"
echo "============================================"
echo ""
echo "Starting backend server..."
echo "Open http://localhost:8090 in your browser"
echo "Press Ctrl+C to stop"
echo ""
cd backend
python3 -m uvicorn app.main:app --host 0.0.0.0 --port 8090