@mytec: iter3.4.0 start

This commit is contained in:
2026-02-02 21:30:00 +02:00
parent 7f0b4d2269
commit 867ee3d0f4
29 changed files with 1386 additions and 324 deletions

View File

@@ -0,0 +1,439 @@
# RFCP Iteration 3.4.0 — Large Radius Support (20-50km)
## Goal
Enable 50km radius calculations without OOM by implementing memory-efficient processing patterns.
**Current limitation:** > 10-20km radius causes OOM (5+ GB RAM usage)
**Target:** 50km radius with < 4GB RAM peak
---
## Phase 1: Memory-Mapped Terrain
### 1.1 Terrain mmap Loading
Change terrain_service to use memory-mapped files instead of loading full arrays into RAM.
**File:** `backend/app/services/terrain_service.py`
```python
# Before (loads ~25 MB per tile into RAM):
terrain = np.fromfile(f, dtype='>i2').reshape((rows, cols))
# After (near-zero RAM, OS pages from disk):
terrain = np.memmap(f, dtype='>i2', mode='r', shape=(rows, cols))
```
**Expected impact:** -200-400 MB RAM per tile
### 1.2 Terrain Disk Cache
- Save downloaded .hgt files to persistent disk cache
- Don't keep raw arrays in memory after initial processing
- Implement LRU eviction if cache exceeds 2GB
- Location: `~/.rfcp/terrain_cache/`
---
## Phase 2: Tile-Based Processing
### 2.1 Split Large Calculations
If radius > 10km, split calculation area into 5km sub-tiles.
**File:** `backend/app/services/coverage_service.py` (or new `tile_processor.py`)
```python
def calculate_coverage_tiled(site, radius_m, resolution_m, settings):
"""Tile-based calculation for large radius."""
# Small radius — use existing single-pass
if radius_m <= 10000:
return calculate_coverage_single(site, radius_m, resolution_m, settings)
# Large radius — split into tiles
TILE_SIZE = 5000 # 5km tiles
tiles = generate_tile_grid(site.lat, site.lon, radius_m, TILE_SIZE)
all_results = []
for i, tile in enumerate(tiles):
log(f"Processing tile {i+1}/{len(tiles)}: {tile.bbox}")
# Load data for this tile only
tile_terrain = load_terrain_for_bbox(tile.bbox)
tile_buildings = load_buildings_for_bbox(tile.bbox)
# Calculate coverage for tile
tile_points = generate_grid_for_tile(tile, resolution_m)
tile_results = calculate_points(tile_points, site, settings,
tile_terrain, tile_buildings)
all_results.extend(tile_results)
# Free memory
del tile_terrain, tile_buildings
gc.collect()
# Report progress
progress = (i + 1) / len(tiles) * 100
yield_progress(progress, f"Tile {i+1}/{len(tiles)}")
return merge_and_dedupe_results(all_results)
def generate_tile_grid(center_lat, center_lon, radius_m, tile_size_m):
"""Generate grid of tiles covering the calculation area."""
tiles = []
# Calculate bbox of full area
lat_delta = radius_m / 111000
lon_delta = radius_m / (111000 * cos(radians(center_lat)))
# Generate tile grid
n_tiles = ceil(radius_m * 2 / tile_size_m)
for i in range(n_tiles):
for j in range(n_tiles):
tile_bbox = calculate_tile_bbox(center_lat, center_lon,
i, j, n_tiles, tile_size_m)
# Only include tiles that intersect with coverage circle
if tile_intersects_circle(tile_bbox, center_lat, center_lon, radius_m):
tiles.append(Tile(bbox=tile_bbox, index=(i, j)))
return tiles
```
### 2.2 Progressive Results via WebSocket
Send results per-tile as they complete, so user sees coverage growing.
**File:** `backend/app/api/websocket.py`
```python
async def calculate_coverage_ws(websocket, params):
for tile_results in calculate_coverage_tiled_generator(params):
# Send partial results
await websocket.send_json({
"type": "partial_results",
"points": tile_results.points,
"progress": tile_results.progress,
"tile": tile_results.tile_index,
"status": f"Tile {tile_results.tile_index} complete"
})
# Final message
await websocket.send_json({
"type": "complete",
"total_points": total_points,
"computation_time": elapsed
})
```
---
## Phase 3: SQLite Cache for OSM Data
### 3.1 Create Local Database
Replace in-memory OSM cache with SQLite database with spatial indexing.
**File:** `backend/app/services/cache_db.py` (NEW)
```python
import sqlite3
import json
class OSMCacheDB:
def __init__(self, db_path="~/.rfcp/osm_cache.db"):
self.conn = sqlite3.connect(db_path)
self._init_tables()
def _init_tables(self):
self.conn.executescript("""
CREATE TABLE IF NOT EXISTS buildings (
id INTEGER PRIMARY KEY,
osm_id TEXT UNIQUE,
lat REAL NOT NULL,
lon REAL NOT NULL,
height REAL DEFAULT 10.0,
geometry TEXT, -- GeoJSON
cell_key TEXT, -- grid cell for batch loading
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_buildings_lat ON buildings(lat);
CREATE INDEX IF NOT EXISTS idx_buildings_lon ON buildings(lon);
CREATE INDEX IF NOT EXISTS idx_buildings_cell ON buildings(cell_key);
CREATE TABLE IF NOT EXISTS vegetation (
id INTEGER PRIMARY KEY,
osm_id TEXT UNIQUE,
lat REAL NOT NULL,
lon REAL NOT NULL,
type TEXT,
geometry TEXT,
cell_key TEXT
);
CREATE INDEX IF NOT EXISTS idx_veg_lat ON vegetation(lat);
CREATE INDEX IF NOT EXISTS idx_veg_lon ON vegetation(lon);
-- Metadata for cache invalidation
CREATE TABLE IF NOT EXISTS cache_meta (
cell_key TEXT PRIMARY KEY,
data_type TEXT,
fetched_at TIMESTAMP,
item_count INTEGER
);
""")
self.conn.commit()
def query_buildings_bbox(self, min_lat, max_lat, min_lon, max_lon, limit=20000):
"""Query buildings within bounding box."""
cursor = self.conn.execute("""
SELECT osm_id, lat, lon, height, geometry
FROM buildings
WHERE lat BETWEEN ? AND ?
AND lon BETWEEN ? AND ?
LIMIT ?
""", (min_lat, max_lat, min_lon, max_lon, limit))
return [self._row_to_building(row) for row in cursor]
def insert_buildings(self, buildings, cell_key):
"""Bulk insert buildings from OSM fetch."""
self.conn.executemany("""
INSERT OR IGNORE INTO buildings
(osm_id, lat, lon, height, geometry, cell_key)
VALUES (?, ?, ?, ?, ?, ?)
""", [
(b['id'], b['lat'], b['lon'], b.get('height', 10),
json.dumps(b.get('geometry')), cell_key)
for b in buildings
])
self.conn.commit()
def is_cell_cached(self, cell_key, data_type, max_age_hours=24):
"""Check if cell data is cached and fresh."""
cursor = self.conn.execute("""
SELECT fetched_at FROM cache_meta
WHERE cell_key = ? AND data_type = ?
AND fetched_at > datetime('now', ?)
""", (cell_key, data_type, f'-{max_age_hours} hours'))
return cursor.fetchone() is not None
```
### 3.2 Update OSM Client
Modify OSM client to use SQLite cache.
**File:** `backend/app/services/osm_client.py`
```python
class OSMClient:
def __init__(self):
self.cache_db = OSMCacheDB()
def get_buildings(self, bbox, max_count=20000):
min_lat, min_lon, max_lat, max_lon = bbox
cell_key = self._bbox_to_cell_key(bbox)
# Check cache first
if self.cache_db.is_cell_cached(cell_key, 'buildings'):
return self.cache_db.query_buildings_bbox(
min_lat, max_lat, min_lon, max_lon, max_count
)
# Fetch from Overpass API
buildings = self._fetch_from_overpass(bbox, 'buildings')
# Store in cache
self.cache_db.insert_buildings(buildings, cell_key)
return buildings[:max_count]
```
---
## Phase 4: Worker Memory Optimization
### 4.1 Per-Tile Building Loading
Workers receive only tile bbox and query buildings themselves (or receive pre-filtered list).
```python
def _pool_worker_tiled(args):
"""Worker that loads buildings for its tile only."""
tile_bbox, terrain_shm_refs, config = args
# Load only buildings for this tile
cache_db = OSMCacheDB()
buildings = cache_db.query_buildings_bbox(*tile_bbox, limit=5000)
# Much smaller memory footprint per worker
# ...rest of calculation
```
### 4.2 Adaptive Worker Count
Reduce workers for large radius to prevent combined memory explosion.
```python
def get_worker_count_for_radius(radius_m, base_workers):
"""Scale down workers for large calculations."""
if radius_m > 30000:
return min(base_workers, 2)
elif radius_m > 20000:
return min(base_workers, 3)
elif radius_m > 10000:
return min(base_workers, 4)
return base_workers
```
---
## Phase 5: Frontend Progressive Rendering
### 5.1 Accumulate Partial Results
**File:** `frontend/src/store/coverage.ts`
```typescript
interface CoverageState {
points: CoveragePoint[];
isCalculating: boolean;
progress: number;
// NEW:
partialResults: CoveragePoint[];
tilesCompleted: number;
totalTiles: number;
}
// Handle partial results
case 'partial_results':
set(state => ({
partialResults: [...state.partialResults, ...message.points],
progress: message.progress,
tilesCompleted: state.tilesCompleted + 1
}));
break;
case 'complete':
set(state => ({
points: state.partialResults, // Finalize
partialResults: [],
isCalculating: false
}));
break;
```
### 5.2 Incremental Heatmap Render
**File:** `frontend/src/components/map/CoverageHeatmap.tsx`
```typescript
function CoverageHeatmap() {
const { points, partialResults, isCalculating } = useCoverageStore();
// Show partial results while calculating
const displayPoints = isCalculating ? partialResults : points;
// Throttle re-renders during streaming (every 500 points)
const throttledPoints = useThrottle(displayPoints, 500);
return <HeatmapLayer points={throttledPoints} />;
}
```
---
## Implementation Order
### Priority 1 — Biggest Impact
1. **Tile-based processing** (Phase 2.1) — enables large radius
2. **SQLite cache** (Phase 3) — reduces memory, speeds up repeated calcs
### Priority 2 — Memory Reduction
3. **Terrain mmap** (Phase 1.1) — easy win, minimal code change
4. **Per-tile building loading** (Phase 4.1)
### Priority 3 — UX Improvement
5. **Progressive WebSocket** (Phase 2.2)
6. **Frontend streaming** (Phase 5)
### Priority 4 — Polish
7. **Terrain disk cache** (Phase 1.2)
8. **Adaptive worker count** (Phase 4.2)
---
## Success Criteria
| Radius | Max Time | Max RAM |
|--------|----------|---------|
| 20 km | < 3 min | < 3 GB |
| 30 km | < 5 min | < 3.5 GB |
| 50 km | < 10 min | < 4 GB |
- No OOM crashes at any radius up to 50km
- Progressive results visible within 30s of starting
- Cache reuse speeds up repeated calculations 5-10x
---
## Files to Modify
### Backend (Python)
| File | Changes |
|------|---------|
| `terrain_service.py` | mmap loading, disk cache |
| `coverage_service.py` | tile-based routing |
| `parallel_coverage_service.py` | adaptive workers |
| `osm_client.py` | SQLite integration |
| `websocket.py` | streaming results |
| **NEW** `tile_processor.py` | tile generation & processing |
| **NEW** `cache_db.py` | SQLite cache layer |
### Frontend (TypeScript)
| File | Changes |
|------|---------|
| `store/coverage.ts` | partial results handling |
| `CoverageHeatmap.tsx` | incremental rendering |
| `App.tsx` | progress for tiled calc |
---
## Testing
```bash
# Test 20km radius
curl -X POST http://localhost:8888/api/coverage/calculate \
-H "Content-Type: application/json" \
-d '{"radius": 20000, "resolution": 500, "preset": "standard"}'
# Monitor memory
watch -n 1 'ps aux | grep rfcp-server | awk "{print \$6/1024\" MB\"}"'
# Test 50km radius
curl -X POST http://localhost:8888/api/coverage/calculate \
-H "Content-Type: application/json" \
-d '{"radius": 50000, "resolution": 1000, "preset": "standard"}'
```
---
## Notes
- Tile size 5km is a balance — smaller = more overhead, larger = more memory
- SQLite R-tree extension would be faster but requires compilation
- For Rust version, all of this will be native and faster
---
*"Think in tiles, stream results, cache everything"* 🗺️

View File

@@ -1,233 +0,0 @@
# RFCP Development Session Summary
## Date: February 1, 2025 (actually 2026)
## Status: Phase 3.0 Complete, Performance Optimization Ongoing
---
## 🎯 Project Overview
**RFCP (Radio Frequency Coverage Planning)** — desktop application for tactical LTE network planning, part of UMTC (Ukrainian Military Tactical Communications) project.
**Tech Stack:**
- Backend: Python/FastAPI + NumPy + ProcessPoolExecutor
- Frontend: React + TypeScript + Vite
- Desktop: Electron
- Build: PyInstaller (backend), electron-builder (desktop)
**Goal:** Calculate RF coverage maps with terrain, buildings, vegetation analysis.
---
## ✅ What Works (Phase 3.0 Achievements)
### Performance
| Preset | Before | After | Status |
|--------|--------|-------|--------|
| Standard (100-200m res) | 38s | **~5s** | ✅ EXCELLENT |
| Detailed (300m, 5km) | timeout | timeout | ❌ Still broken |
### Architecture (48 new files, 82 tests)
- ✅ Modular propagation models (8 models: FreeSpace, Okumura-Hata, COST-231, ITU-R P.1546, etc.)
- ✅ SharedMemoryManager for terrain data (zero-copy, 25 MB)
- ✅ Building filtering (351k → 27k bbox → 15k cap)
- ✅ WebSocket progress streaming (backend works)
- ✅ Clean model selection by frequency/environment
- ✅ Worker cleanup on shutdown
- ✅ Overpass API retry with failover (3 attempts, mirror endpoint)
### New Files Structure
```
backend/app/
├── propagation/ # 8 model files
├── geometry/ # 5 files (haversine, intersection, reflection, diffraction, los)
├── core/ # 4 files (engine, grid, calculator, result)
├── parallel/ # 3 files (manager, worker, pool)
├── services/ # cache.py, osm_client.py
├── utils/ # logging.py, progress.py, units.py
└── api/websocket.py
frontend/src/
├── hooks/useWebSocket.ts
├── services/websocket.ts
└── components/FrequencyBandPanel.tsx
```
---
## ❌ Current Blockers
### 1. Detailed Preset Timeout (CRITICAL)
**Symptom:** 300s timeout, only 194/868 points calculated
**Latest test results:**
```
[DOMINANT_PATH_VEC] Point #1: buildings=30, walls=214, dist=4887m
302.8ms/point × 868 points = 262 seconds
```
**Root Cause Analysis:**
- Early return fix (Claude Code) was for `buildings=[]` case
- But in reality, buildings ARE present (15,000 after cap)
- Each point finds 17-30 nearby buildings
- Each building has 100-295 wall segments
- **dominant_path_service** geometry calculations are expensive
**The real problem is NOT "buildings=0 is slow"**
**The real problem IS "dominant_path with buildings is inherently slow"**
**Potential solutions:**
1. Simplify building geometry (reduce wall count)
2. Use spatial indexing more aggressively
3. Skip dominant_path for distant points (>3km?)
4. Reduce building query radius
5. Use simpler path loss model when buildings present
6. GPU acceleration (CuPy) for geometry
### 2. Progress Bar Stuck at "Initializing 5%"
**Symptom:** UI shows "Initializing 5%" forever
**Fix attempted:** `await asyncio.sleep(0)` after progress_fn() — not working
**Likely cause:** Frontend WebSocket connection or state update issue
### 3. App Close Broken
**Symptom:** Clicking X kills backend but frontend stays open
**Partial fix:** Worker cleanup works, but Electron window doesn't close
### 4. Memory Not Released
**Symptom:** 1328 MB not freed after calculation
```
Before: 3904 MB free
After: 2576 MB free
```
---
## 📊 Performance Analysis
### Why Detailed is slow (the math):
```
Points: 868
Buildings nearby per point: ~25 average
Walls per building: ~150 average
Wall intersection checks: 868 × 25 × 150 = 3,255,000
At 0.1ms per check = 325 seconds
```
### Why Standard is fast:
- Lower resolution = fewer points (~200 vs 868)
- Likely skips some detailed calculations
- Buildings still processed but fewer points to check
---
## 🔧 Key Files to Review
### Backend (performance critical)
```
backend/app/services/
├── dominant_path_service.py # THE BOTTLENECK
├── coverage_service.py # Orchestration, progress
├── parallel_coverage_service.py # Worker management
└── buildings_service.py # OSM fetch, caching
```
### Frontend (UI bugs)
```
frontend/src/
├── App.tsx # Progress display
├── store/coverage.ts # WebSocket state
└── services/websocket.ts # WS connection
```
### Desktop (close bug)
```
desktop/main.js # Electron lifecycle
```
---
## 🎯 Recommended Next Steps
### Priority 1: Fix Detailed Performance
**Option A: Aggressive spatial filtering**
```python
# In dominant_path_service.py
# Only check buildings within line-of-sight corridor
# Not all buildings within radius
```
**Option B: LOD (Level of Detail)**
```python
# Distance > 2km: skip dominant path entirely
# Distance 1-2km: simplified model
# Distance < 1km: full calculation
```
**Option C: Building simplification**
```python
# Reduce wall count per building
# Merge adjacent buildings
# Use bounding boxes instead of polygons for far buildings
```
### Priority 2: Fix UI Bugs
- Debug WebSocket in browser DevTools
- Check Electron close handler
### Priority 3: Memory
- Explicit cleanup after calculation
- Check for leaked references
---
## 📝 Session Timeline
1. **Phase 2.4-2.5.1** — Vectorization attempt (didn't help)
2. **Decision** — Full Phase 3.0 architecture refactor
3. **Architecture Doc** — 1719 lines specification
4. **Claude Code Round 1** — 48 files, 82 tests (35 min)
5. **Integration Round** — WebSocket, progress, model selection (20 min)
6. **Bug Fix Round** — Memory, workers, app close (15 min)
7. **Claude Code Fix** — Dominant path early return, Overpass retry, progress (13 min)
8. **Current** — Still timeout, need different approach
---
## 💡 Key Insights
1. **Vectorization alone doesn't help** — problem is algorithmic, not just numpy
2. **SharedMemory works** — terrain in shared memory is efficient
3. **Building count matters** — 351k→15k filtering helps but not enough
4. **dominant_path is the bottleneck** — consistently 200-300ms/point
5. **Standard preset proves architecture works** — fast when less work needed
---
## 🔗 Related Documents
- `/mnt/project/RFCP-Phase-3.0-Architecture-Refactor.md` — Full architecture spec
- `/mnt/project/SESSION-2025-01-30-Iteration-10_1-Complete.md` — Previous session
- `/mnt/transcripts/2026-02-01-19-06-32-phase-3.0-refactor-implementation-results.txt` — Detailed transcript
---
## 🎮 Side Project
During this session, also designed **DF Diplomacy Expanded** mod:
- Design doc: `DF-Diplomacy-Expanded-Design-Doc.md` (1202 lines)
- MVP: War score, peace negotiation, tribute, reputation
- Motto: *"Losing is fun, but sometimes you want to lose diplomatically."*
---
*"Standard preset works beautifully. Detailed preset needs love. The architecture is solid — now we optimize."*

View File

@@ -8,7 +8,6 @@ progress updates during computation phases.
import time
import asyncio
import logging
import threading
from typing import Optional
from fastapi import WebSocket, WebSocketDisconnect
@@ -51,7 +50,7 @@ class ConnectionManager:
"data": result,
})
except Exception as e:
logger.debug(f"[WS] send_result failed: {e}")
logger.warning(f"[WS] send_result failed: {e}")
async def send_error(self, ws: WebSocket, calc_id: str, error: str):
try:
@@ -61,7 +60,7 @@ class ConnectionManager:
"message": error,
})
except Exception as e:
logger.debug(f"[WS] send_error failed: {e}")
logger.warning(f"[WS] send_error failed: {e}")
ws_manager = ConnectionManager()
@@ -74,14 +73,32 @@ async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
# Shared progress state — written by worker threads, polled by event loop.
# Python GIL makes dict value assignment atomic for simple types.
_progress = {"phase": "Initializing", "pct": 0.05, "seq": 0}
_progress = {"phase": "Initializing", "pct": 0.0, "seq": 0}
_done = False
# Get event loop for cross-thread scheduling of WS sends.
loop = asyncio.get_running_loop()
_last_direct_pct = 0.0
_last_direct_phase = ""
def sync_progress_fn(phase: str, pct: float, _eta: Optional[float] = None):
"""Thread-safe progress callback — just updates a shared dict."""
"""Thread-safe progress callback — updates dict AND schedules direct WS send."""
nonlocal _last_direct_pct, _last_direct_phase
_progress["phase"] = phase
_progress["pct"] = pct
_progress["seq"] += 1
# Schedule direct WS send via event loop (works from any thread).
# Throttle: only send on phase change or >=2% progress.
if phase != _last_direct_phase or pct - _last_direct_pct >= 0.02:
_last_direct_pct = pct
_last_direct_phase = phase
try:
loop.call_soon_threadsafe(
asyncio.ensure_future,
ws_manager.send_progress(ws, calc_id, phase, pct),
)
except RuntimeError:
pass # Event loop closed
try:
sites_data = data.get("sites", [])
@@ -116,21 +133,27 @@ async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
if primary_model.name not in models_used:
models_used.insert(0, primary_model.name)
await ws_manager.send_progress(ws, calc_id, "Initializing", 0.05)
await ws_manager.send_progress(ws, calc_id, "Initializing", 0.02)
# ── Progress poller: reads shared dict and sends WS updates ──
# ── Backup progress poller: catches anything call_soon_threadsafe missed ──
async def progress_poller():
last_sent_seq = 0
last_sent_pct = 0.0
last_sent_phase = "Initializing"
while not _done:
await asyncio.sleep(0.3)
await asyncio.sleep(0.5)
seq = _progress["seq"]
pct = _progress["pct"]
phase = _progress["phase"]
if seq != last_sent_seq and (pct - last_sent_pct >= 0.01 or phase != "Calculating coverage"):
# Send on any phase change OR >=3% progress (primary sends handle fine-grained)
if seq != last_sent_seq and (
phase != last_sent_phase
or pct - last_sent_pct >= 0.03
):
await ws_manager.send_progress(ws, calc_id, phase, pct)
last_sent_seq = seq
last_sent_pct = pct
last_sent_phase = phase
poller_task = asyncio.create_task(progress_poller())
@@ -149,6 +172,7 @@ async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
points = await asyncio.wait_for(
coverage_service.calculate_multi_site_coverage(
sites, settings, cancel_token,
progress_fn=sync_progress_fn,
),
timeout=300.0,
)
@@ -170,7 +194,6 @@ async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
# Stop poller and send final progress
_done = True
await poller_task
await ws_manager.send_progress(ws, calc_id, "Finalizing", 0.98)
computation_time = time.time() - start_time
@@ -201,7 +224,10 @@ async def _run_calculation(ws: WebSocket, calc_id: str, data: dict):
"models_used": models_used,
}
# Send "Complete" before result so frontend shows 100%
await ws_manager.send_progress(ws, calc_id, "Complete", 1.0)
await ws_manager.send_result(ws, calc_id, result)
logger.info(f"[WS] calc={calc_id} done: {len(points)} pts, {computation_time:.1f}s")
except Exception as e:
logger.error(f"[WS] Calculation error: {e}", exc_info=True)

View File

@@ -485,7 +485,16 @@ class CoverageService:
)
streets = _filter_osm_list_to_bbox(streets, min_lat, min_lon, max_lat, max_lon)
water_bodies = _filter_osm_list_to_bbox(water_bodies, min_lat, min_lon, max_lat, max_lon)
vegetation_areas = _filter_osm_list_to_bbox(vegetation_areas, min_lat, min_lon, max_lat, max_lon)
# Cap vegetation at 5000 — each area requires O(samples × areas)
# point-in-polygon checks per grid point. 20k+ areas with dominant
# path enabled causes OOM via worker memory explosion.
vegetation_areas = _filter_osm_list_to_bbox(
vegetation_areas, min_lat, min_lon, max_lat, max_lon,
max_count=5000,
)
_clog(f"Filtered OSM data: {len(buildings)} bldgs, {len(streets)} streets, "
f"{len(water_bodies)} water, {len(vegetation_areas)} veg")
# Build spatial index for buildings
spatial_idx: Optional[SpatialIndex] = None
@@ -650,10 +659,13 @@ class CoverageService:
sites: List[SiteParams],
settings: CoverageSettings,
cancel_token: Optional[CancellationToken] = None,
progress_fn: Optional[Callable[[str, float], None]] = None,
) -> List[CoveragePoint]:
"""
Calculate combined coverage from multiple sites
Best server (strongest signal) wins at each point
progress_fn(phase, pct): optional callback for progress updates (0.0-1.0).
"""
if not sites:
return []
@@ -661,10 +673,26 @@ class CoverageService:
# Apply preset once
settings = apply_preset(settings)
# Per-site progress tracking for averaged overall progress
num_sites = len(sites)
_site_progress = [0.0] * num_sites
def _make_site_progress(idx: int):
"""Create a progress_fn for one site that reports scaled overall progress."""
def _site_fn(phase: str, pct: float, _eta=None):
_site_progress[idx] = pct
if progress_fn:
overall = sum(_site_progress) / num_sites
progress_fn(f"Site {idx + 1}/{num_sites}: {phase}", overall)
return _site_fn
# Get all individual coverages
all_coverages = await asyncio.gather(*[
self.calculate_coverage(site, settings, cancel_token)
for site in sites
self.calculate_coverage(
site, settings, cancel_token,
progress_fn=_make_site_progress(i) if progress_fn else None,
)
for i, site in enumerate(sites)
])
# Combine by best signal
@@ -751,7 +779,8 @@ class CoverageService:
points = []
timing = {"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0}
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0}
total = len(grid)
log_interval = max(1, total // 20)
@@ -901,7 +930,6 @@ class CoverageService:
# LOD_NONE: skip dominant path entirely for distant points (>3km)
if lod == LODLevel.NONE:
timing.setdefault("lod_none", 0)
timing["lod_none"] += 1
else:
t0 = time.time()
@@ -909,12 +937,10 @@ class CoverageService:
# LOD_SIMPLIFIED: limit buildings for mid-range points (1.5-3km)
dp_buildings = nearby_buildings
if lod == LODLevel.SIMPLIFIED:
timing.setdefault("lod_simplified", 0)
timing["lod_simplified"] += 1
if len(nearby_buildings) > SIMPLIFIED_MAX_BUILDINGS:
dp_buildings = nearby_buildings[:SIMPLIFIED_MAX_BUILDINGS]
else:
timing.setdefault("lod_full", 0)
timing["lod_full"] += 1
# nearby_buildings already filtered via spatial index —

View File

@@ -164,11 +164,16 @@ except ImportError:
ray = None # type: ignore
# ── Worker-level spatial index cache (persists across tasks in same worker) ──
# ── Worker-level caches (persist across tasks in same worker process) ──
_worker_spatial_idx = None
_worker_cache_key: Optional[str] = None
# Shared-memory buildings/OSM — unpickled once per worker, cached by key
_worker_shared_buildings = None
_worker_shared_osm_data = None
_worker_shared_data_key: Optional[str] = None
def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
"""Implementation: process a chunk of (lat, lon, elevation) tuples.
@@ -205,6 +210,7 @@ def _ray_process_chunk_impl(chunk, terrain_cache, buildings, osm_data, config):
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
}
precomputed = config.get('precomputed')
@@ -238,9 +244,14 @@ if RAY_AVAILABLE:
def get_cpu_count() -> int:
"""Get number of usable CPU cores, capped at 14."""
"""Get number of usable CPU cores, capped at 6.
Each worker holds its own copy of buildings + OSM data + spatial index
(~200-400 MB per worker). Capping at 6 prevents OOM on systems with
8-16 GB RAM (especially WSL2 with limited memory allocation).
"""
try:
return min(mp.cpu_count() or 4, 14)
return min(mp.cpu_count() or 4, 6)
except Exception:
return 4
@@ -327,8 +338,25 @@ def calculate_coverage_parallel(
except Exception as e:
log_fn(f"Ray execution failed: {e} — falling back to sequential")
# Fallback: ProcessPoolExecutor with reduced workers to avoid MemoryError
pool_workers = min(num_workers, 6)
# Fallback: ProcessPoolExecutor (shared memory eliminates per-chunk pickle)
pool_workers = num_workers
# Scale workers down based on data volume to prevent OOM.
# Each worker unpickles + holds its own copy of buildings, OSM data, and
# spatial index. With large datasets the per-worker memory can exceed
# 300 MB, so reduce workers to keep total under ~2 GB.
data_items = len(buildings) + len(streets) + len(water_bodies) + len(vegetation_areas)
if data_items > 20000:
pool_workers = min(pool_workers, 2)
log_fn(f"Data volume high ({data_items} items) — capping workers at {pool_workers}")
elif data_items > 10000:
pool_workers = min(pool_workers, 3)
log_fn(f"Data volume moderate ({data_items} items) — capping workers at {pool_workers}")
elif data_items > 5000:
pool_workers = min(pool_workers, 4)
log_fn(f"Data volume elevated ({data_items} items) — capping workers at {pool_workers}")
log_fn(f"ProcessPool: {pool_workers} workers (cpu_count={num_workers}, data_items={data_items})")
if pool_workers > 1 and total_points > 100:
try:
return _calculate_with_process_pool(
@@ -338,6 +366,8 @@ def calculate_coverage_parallel(
pool_workers, log_fn, cancel_token, precomputed,
progress_fn,
)
except (MemoryError, OSError) as e:
log_fn(f"ProcessPool OOM/OS error: {e} — falling back to sequential")
except Exception as e:
log_fn(f"ProcessPool failed: {e} — falling back to sequential")
@@ -396,8 +426,8 @@ def _calculate_with_ray(
for lat, lon in grid
]
# ~4 chunks per worker for granular progress
chunk_size = max(1, len(items) // (num_workers * 4))
# Larger chunks to amortize IPC overhead (was num_workers*4)
chunk_size = max(1, min(400, len(items) // max(2, num_workers)))
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
@@ -489,6 +519,7 @@ def _pool_worker_process_chunk(args):
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
}
precomputed = config.get('precomputed')
@@ -542,6 +573,28 @@ def _store_terrain_in_shm(terrain_cache: Dict[str, np.ndarray], log_fn) -> Tuple
return blocks, refs
def _store_pickle_in_shm(data, label: str, log_fn) -> Tuple[Optional[Any], Optional[dict]]:
"""Pickle arbitrary data into a SharedMemory block.
Returns (shm_block, ref_dict) where ref_dict = {shm_name, size}.
On failure returns (None, None) and caller should fall back to pickle.
"""
import multiprocessing.shared_memory as shm_mod
import pickle
try:
blob = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
size = len(blob)
block = shm_mod.SharedMemory(create=True, size=size)
block.buf[:size] = blob
mb = size / (1024 * 1024)
log_fn(f"{label} in shared memory: {mb:.1f} MB")
return block, {'shm_name': block.name, 'size': size}
except Exception as e:
log_fn(f"Failed to store {label} in shm: {e}")
return None, None
def _pool_worker_shm_chunk(args):
"""Worker function that reads terrain from shared memory instead of pickle."""
import multiprocessing.shared_memory as shm_mod
@@ -585,6 +638,7 @@ def _pool_worker_shm_chunk(args):
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
}
precomputed = config.get('precomputed')
@@ -607,6 +661,200 @@ def _pool_worker_shm_chunk(args):
return results
_worker_chunk_count: int = 0 # per-worker chunk counter
def _pool_worker_shm_shared(args):
"""Worker: terrain + buildings + OSM all via shared memory.
Per-chunk args are tiny (~8 KB): just point coords, shm refs, and config.
Buildings and OSM data are unpickled from shared memory ONCE per worker
and cached in module globals for subsequent chunks.
"""
import multiprocessing.shared_memory as shm_mod
import pickle
global _worker_chunk_count
_worker_chunk_count += 1
pid = os.getpid()
t_worker_start = time.perf_counter()
chunk, terrain_shm_refs, shared_data_refs, config = args
# ── Reconstruct terrain from shared memory ──
t0 = time.perf_counter()
terrain_cache = {}
for tile_name, ref in terrain_shm_refs.items():
try:
block = shm_mod.SharedMemory(name=ref['shm_name'])
terrain_cache[tile_name] = np.ndarray(
ref['shape'], dtype=ref['dtype'], buffer=block.buf,
)
except Exception:
pass
from app.services.terrain_service import terrain_service
terrain_service._tile_cache = terrain_cache
t_terrain_shm = time.perf_counter() - t0
# ── Read buildings + OSM from shared memory (cached per worker) ──
global _worker_shared_buildings, _worker_shared_osm_data, _worker_shared_data_key
global _worker_spatial_idx, _worker_cache_key
data_key = config.get('cache_key', '')
cached = (_worker_shared_data_key == data_key)
t_unpickle_bld = 0.0
t_unpickle_osm = 0.0
t_spatial = 0.0
if not cached:
# First chunk for this calculation — unpickle from shm
buildings_ref = shared_data_refs.get('buildings')
osm_ref = shared_data_refs.get('osm_data')
if buildings_ref:
try:
t0 = time.perf_counter()
blk = shm_mod.SharedMemory(name=buildings_ref['shm_name'])
_worker_shared_buildings = pickle.loads(bytes(blk.buf[:buildings_ref['size']]))
t_unpickle_bld = time.perf_counter() - t0
except Exception:
_worker_shared_buildings = []
else:
_worker_shared_buildings = []
if osm_ref:
try:
t0 = time.perf_counter()
blk = shm_mod.SharedMemory(name=osm_ref['shm_name'])
_worker_shared_osm_data = pickle.loads(bytes(blk.buf[:osm_ref['size']]))
t_unpickle_osm = time.perf_counter() - t0
except Exception:
_worker_shared_osm_data = {}
else:
_worker_shared_osm_data = {}
_worker_shared_data_key = data_key
# Rebuild spatial index for new data
t0 = time.perf_counter()
if _worker_shared_buildings:
from app.services.spatial_index import SpatialIndex
_worker_spatial_idx = SpatialIndex()
_worker_spatial_idx.build(_worker_shared_buildings)
else:
_worker_spatial_idx = None
_worker_cache_key = data_key
t_spatial = time.perf_counter() - t0
print(
f"[WORKER {pid}] Init: terrain_shm={t_terrain_shm*1000:.1f}ms "
f"unpickle_bld={t_unpickle_bld*1000:.1f}ms "
f"unpickle_osm={t_unpickle_osm*1000:.1f}ms "
f"spatial={t_spatial*1000:.1f}ms "
f"buildings={len(_worker_shared_buildings or [])} "
f"tiles={len(terrain_cache)}",
flush=True,
)
print(
f"[WORKER {pid}] Processing chunk {_worker_chunk_count}, "
f"cached={cached}, points={len(chunk)}",
flush=True,
)
buildings = _worker_shared_buildings or []
osm_data = _worker_shared_osm_data or {}
# ── Imports + object creation (timed) ──
t0 = time.perf_counter()
from app.services.coverage_service import CoverageService, SiteParams, CoverageSettings
t_import = time.perf_counter() - t0
t0 = time.perf_counter()
site = SiteParams(**config['site_dict'])
settings = CoverageSettings(**config['settings_dict'])
svc = CoverageService()
t_pydantic = time.perf_counter() - t0
timing = {
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
}
precomputed = config.get('precomputed')
streets = osm_data.get('streets', [])
water = osm_data.get('water_bodies', [])
veg = osm_data.get('vegetation_areas', [])
site_elev = config['site_elevation']
t_init_done = time.perf_counter()
init_ms = (t_init_done - t_worker_start) * 1000
# ── Process points with per-point profiling (first 3 only) ──
results = []
t_loop_start = time.perf_counter()
t_model_dump_total = 0.0
n_dumped = 0
for i, (lat, lon, point_elev) in enumerate(chunk):
pre = precomputed.get((lat, lon)) if precomputed else None
# Snapshot timing dict before call (for first 3 points)
if i < 3:
timing_before = {k: v for k, v in timing.items()}
t_pt = time.perf_counter()
point = svc._calculate_point_sync(
site, lat, lon, settings,
buildings, streets,
_worker_spatial_idx, water, veg,
site_elev, point_elev, timing,
precomputed_distance=pre.get('distance') if pre else None,
precomputed_path_loss=pre.get('path_loss') if pre else None,
)
if i < 3:
t_pt_done = time.perf_counter()
pt_ms = (t_pt_done - t_pt) * 1000
deltas = {k: (timing[k] - timing_before.get(k, 0)) * 1000 for k in timing}
parts = " ".join(f"{k}={v:.2f}" for k, v in deltas.items() if v > 0.001)
print(
f"[WORKER {pid}] Point {i}: {pt_ms:.2f}ms "
f"rsrp={point.rsrp:.1f} dist={point.distance:.0f}m "
f"breakdown=[{parts}]",
flush=True,
)
if point.rsrp >= settings.min_signal:
t_md = time.perf_counter()
results.append(point.model_dump())
t_model_dump_total += time.perf_counter() - t_md
n_dumped += 1
t_loop_done = time.perf_counter()
loop_ms = (t_loop_done - t_loop_start) * 1000
total_ms = (t_loop_done - t_worker_start) * 1000
avg_pt = loop_ms / len(chunk) if chunk else 0
avg_dump = (t_model_dump_total * 1000 / n_dumped) if n_dumped else 0
print(
f"[WORKER {pid}] Chunk done: total={total_ms:.0f}ms "
f"init={init_ms:.0f}ms loop={loop_ms:.0f}ms "
f"avg_pt={avg_pt:.2f}ms model_dump={avg_dump:.2f}ms×{n_dumped} "
f"import={t_import*1000:.1f}ms pydantic={t_pydantic*1000:.1f}ms "
f"terrain_shm={t_terrain_shm*1000:.1f}ms "
f"results={len(results)}/{len(chunk)}",
flush=True,
)
return results
def _calculate_with_process_pool(
grid, point_elevations, site_dict, settings_dict,
terrain_cache, buildings, streets, water_bodies,
@@ -616,23 +864,28 @@ def _calculate_with_process_pool(
):
"""Execute using ProcessPoolExecutor.
Uses shared memory for terrain tiles (zero-copy numpy views) to reduce
memory usage compared to pickling full terrain arrays per worker.
Uses shared memory for terrain tiles (zero-copy numpy views), buildings,
and OSM data (pickle-once, read-many) to eliminate per-chunk serialization
overhead.
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
total_points = len(grid)
# Estimate pickle size for building data and cap workers accordingly
building_count = len(buildings)
if building_count > 10000:
num_workers = min(num_workers, 3)
log_fn(f"Large building set ({building_count}) — reducing workers to {num_workers}")
elif building_count > 5000:
num_workers = min(num_workers, 4)
data_items = building_count + len(streets) + len(water_bodies) + len(vegetation_areas)
log_fn(f"ProcessPool mode: {total_points} points, {num_workers} workers, "
f"{building_count} buildings")
f"{building_count} buildings, {data_items} total OSM items")
# Log memory at start
try:
with open('/proc/self/status') as f:
for line in f:
if line.startswith('VmRSS:'):
log_fn(f"Memory before calculation: {line.strip()}")
break
except Exception:
pass
# Store terrain tiles in shared memory
shm_blocks = []
@@ -652,12 +905,31 @@ def _calculate_with_process_pool(
log_fn(f"Shared memory setup failed ({e}), using pickle fallback")
use_shm = False
# Store buildings + OSM data in shared memory (pickle once, read many)
shared_data_refs = {}
if use_shm:
bld_block, bld_ref = _store_pickle_in_shm(buildings, "Buildings", log_fn)
if bld_block:
shm_blocks.append(bld_block)
shared_data_refs['buildings'] = bld_ref
osm_data_dict = {
'streets': streets,
'water_bodies': water_bodies,
'vegetation_areas': vegetation_areas,
}
osm_block, osm_ref = _store_pickle_in_shm(osm_data_dict, "OSM data", log_fn)
if osm_block:
shm_blocks.append(osm_block)
shared_data_refs['osm_data'] = osm_ref
items = [
(lat, lon, point_elevations.get((lat, lon), 0.0))
for lat, lon in grid
]
chunk_size = max(1, len(items) // (num_workers * 2))
# Target larger chunks to amortize IPC overhead (was num_workers*2)
chunk_size = max(1, min(400, len(items) // max(2, num_workers)))
chunks = [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
log_fn(f"Submitting {len(chunks)} chunks of ~{chunk_size} points")
@@ -685,8 +957,21 @@ def _calculate_with_process_pool(
pool = ProcessPoolExecutor(max_workers=num_workers, mp_context=ctx)
_set_active_pool(pool)
if use_shm:
# Shared memory path: pass shm refs instead of terrain data
if use_shm and shared_data_refs:
# Full shared memory path: terrain + buildings + OSM all via shm
worker_fn = _pool_worker_shm_shared
futures = {
pool.submit(
worker_fn,
(chunk, terrain_shm_refs, shared_data_refs, config),
): i
for i, chunk in enumerate(chunks)
}
elif use_shm and data_items <= 2000:
# Terrain-only shm — buildings/OSM pickled per chunk.
# Only safe for small datasets; large datasets would OOM from
# pickle copies (num_chunks × pickle_size).
log_fn(f"Terrain-only shm (small data: {data_items} items)")
worker_fn = _pool_worker_shm_chunk
futures = {
pool.submit(
@@ -695,8 +980,9 @@ def _calculate_with_process_pool(
): i
for i, chunk in enumerate(chunks)
}
else:
# Pickle fallback path
elif data_items <= 2000:
# Full pickle fallback — only safe for small datasets
log_fn(f"Full pickle path (small data: {data_items} items)")
futures = {
pool.submit(
_pool_worker_process_chunk,
@@ -704,6 +990,14 @@ def _calculate_with_process_pool(
): i
for i, chunk in enumerate(chunks)
}
else:
# Large dataset + shared memory failed → per-chunk pickle would OOM.
# Bail out; caller will fall back to sequential.
log_fn(f"Shared memory failed for large dataset ({data_items} items) "
f"— skipping ProcessPool to avoid OOM")
raise MemoryError(
f"Cannot safely pickle {data_items} OSM items per chunk"
)
completed_chunks = 0
for future in as_completed(futures):
@@ -730,6 +1024,9 @@ def _calculate_with_process_pool(
if progress_fn:
progress_fn("Calculating coverage", 0.40 + 0.55 * (completed_chunks / len(chunks)))
except MemoryError:
raise # Propagate to caller for sequential fallback
except Exception as e:
log_fn(f"ProcessPool error: {e}")
@@ -748,8 +1045,22 @@ def _calculate_with_process_pool(
block.unlink()
except Exception:
pass
# Release large local references before GC
chunks = None # noqa: F841
items = None # noqa: F841
osm_data = None # noqa: F841
shared_data_refs = None # noqa: F841
# Force garbage collection to release memory from workers
gc.collect()
# Log memory after cleanup
try:
with open('/proc/self/status') as f:
for line in f:
if line.startswith('VmRSS:'):
log_fn(f"Memory after cleanup: {line.strip()}")
break
except Exception:
pass
calc_time = time.time() - t_calc
log_fn(f"ProcessPool done: {calc_time:.1f}s, {len(all_results)} results "
@@ -758,7 +1069,11 @@ def _calculate_with_process_pool(
timing = {
"parallel_total": calc_time,
"workers": num_workers,
"backend": "process_pool" + ("/shm" if use_shm else "/pickle"),
"backend": "process_pool" + (
"/shm_full" if (use_shm and shared_data_refs)
else "/shm_terrain" if use_shm
else "/pickle"
),
}
return all_results, timing
@@ -791,6 +1106,7 @@ def _calculate_sequential(
"los": 0.0, "buildings": 0.0, "antenna": 0.0,
"dominant_path": 0.0, "street_canyon": 0.0,
"reflection": 0.0, "vegetation": 0.0,
"lod_none": 0, "lod_simplified": 0, "lod_full": 0,
}
t0 = time.time()

View File

@@ -19,6 +19,8 @@ import SiteList from '@/components/panels/SiteList.tsx';
import ExportPanel from '@/components/panels/ExportPanel.tsx';
import ProjectPanel from '@/components/panels/ProjectPanel.tsx';
import CoverageStats from '@/components/panels/CoverageStats.tsx';
import HistoryPanel from '@/components/panels/HistoryPanel.tsx';
import ResultsPanel from '@/components/panels/ResultsPanel.tsx';
import SiteImportExport from '@/components/panels/SiteImportExport.tsx';
import { SiteConfigModal } from '@/components/modals/index.ts';
import type { SiteFormValues } from '@/components/modals/index.ts';
@@ -394,8 +396,8 @@ export default function App() {
const currentSettings = useCoverageStore.getState().settings;
// Validation
if (currentSettings.radius > 100) {
addToast('Radius too large (max 100km)', 'error');
if (currentSettings.radius > 50) {
addToast('Radius too large (max 50km)', 'error');
return;
}
if (currentSettings.resolution < 50) {
@@ -406,9 +408,17 @@ export default function App() {
try {
await calculateCoverageApi();
// Check result after calculation
const result = useCoverageStore.getState().result;
const error = useCoverageStore.getState().error;
// After calculateCoverageApi returns, check if WS took over.
// In WS mode, the function returns immediately and result arrives asynchronously.
const state = useCoverageStore.getState();
if (state.isCalculating && state.activeCalcId) {
// WebSocket mode — toast will be shown from the WS onResult callback
return;
}
// HTTP mode — result is ready now
const result = state.result;
const error = state.error;
if (error) {
let userMessage = 'Calculation failed';
@@ -666,6 +676,7 @@ export default function App() {
)}
</MapView>
<HeatmapLegend />
<ResultsPanel />
</div>
{/* Side panel */}
@@ -706,14 +717,15 @@ export default function App() {
<NumberInput
label="Radius"
value={settings.radius}
onChange={(v) =>
useCoverageStore.getState().updateSettings({ radius: v })
}
onChange={(v) => {
const clamped = Math.min(v, 50);
useCoverageStore.getState().updateSettings({ radius: clamped });
}}
min={1}
max={100}
max={50}
step={5}
unit="km"
hint="Calculation area around each site"
hint="Calculation area around each site (max 50km)"
/>
<NumberInput
label="Resolution"
@@ -1174,6 +1186,9 @@ export default function App() {
modelsUsed={coverageResult?.modelsUsed}
/>
{/* Session history */}
<HistoryPanel />
{/* Export coverage data */}
<ExportPanel />

View File

@@ -27,7 +27,7 @@ export default function CoverageBoundary({
points,
visible,
resolution,
color = '#7c3aed', // purple-600 — visible against both map and orange gradient
color = '#ffffff', // white — visible against red-to-blue gradient
weight = 2,
}: CoverageBoundaryProps) {
const map = useMap();

View File

@@ -13,12 +13,11 @@ import { useSitesStore } from '@/store/sites.ts';
const LEGEND_STEPS = [
{ rsrp: -130, label: 'No Service' },
{ rsrp: -110, label: 'Very Weak' },
{ rsrp: -100, label: 'Weak' },
{ rsrp: -90, label: 'Fair' },
{ rsrp: -80, label: 'Good' },
{ rsrp: -70, label: 'Strong' },
{ rsrp: -50, label: 'Excellent' },
{ rsrp: -110, label: 'Weak' },
{ rsrp: -100, label: 'Fair' },
{ rsrp: -85, label: 'Good' },
{ rsrp: -70, label: 'Excellent' },
{ rsrp: -50, label: 'Max' },
];
/** Build a CSS linear-gradient string matching the heatmap gradient exactly. */
@@ -106,9 +105,9 @@ export default function HeatmapLegend() {
{/* Cutoff indicator + below-threshold (dimmed) */}
{belowThreshold.length > 0 && (
<div className="mt-1.5 pt-1.5 border-t border-dashed border-purple-400 dark:border-purple-500">
<div className="mt-1.5 pt-1.5 border-t border-dashed border-gray-400 dark:border-gray-500">
<div className="flex items-center gap-1 mb-1">
<span className="text-[9px] text-purple-500 dark:text-purple-400 font-medium">
<span className="text-[9px] text-gray-500 dark:text-gray-400 font-medium">
Coverage boundary ({threshold} dBm)
</span>
</div>

View File

@@ -1,6 +1,5 @@
import { useState, useEffect, useCallback } from 'react';
import NumberInput from '@/components/ui/NumberInput.tsx';
import FrequencySelector from '@/components/panels/FrequencySelector.tsx';
import FrequencyBandPanel from '@/components/panels/FrequencyBandPanel.tsx';
import ModalBackdrop from './ModalBackdrop.tsx';
@@ -31,6 +30,7 @@ interface SiteConfigModalProps {
const TEMPLATES = {
limesdr: {
label: 'LimeSDR',
tooltip: 'SDR dev board — low power, short range testing (20 dBm, 2 dBi, 1800 MHz)',
style: 'purple',
name: 'LimeSDR Mini',
power: 20,
@@ -41,6 +41,7 @@ const TEMPLATES = {
},
lowBBU: {
label: 'Low BBU',
tooltip: 'Low-power baseband unit — suburban/campus coverage (40 dBm, 8 dBi, 1800 MHz)',
style: 'green',
name: 'Low Power BBU',
power: 40,
@@ -51,6 +52,7 @@ const TEMPLATES = {
},
highBBU: {
label: 'High BBU',
tooltip: 'High-power BBU — urban macro sector (43 dBm, 15 dBi, 65\u00B0 sector)',
style: 'orange',
name: 'High Power BBU',
power: 43,
@@ -63,6 +65,7 @@ const TEMPLATES = {
},
urbanMacro: {
label: 'Urban Macro',
tooltip: 'Standard urban macro site — rooftop/tower sector (43 dBm, 18 dBi, 65\u00B0 sector)',
style: 'blue',
name: 'Urban Macro Site',
power: 43,
@@ -75,6 +78,7 @@ const TEMPLATES = {
},
ruralTower: {
label: 'Rural Tower',
tooltip: 'Rural high tower — long range 800 MHz omni coverage (46 dBm, 8 dBi, 50m)',
style: 'emerald',
name: 'Rural Tower',
power: 46,
@@ -85,6 +89,7 @@ const TEMPLATES = {
},
smallCell: {
label: 'Small Cell',
tooltip: 'Urban small cell — street-level high capacity (30 dBm, 12 dBi, 2600 MHz)',
style: 'cyan',
name: 'Small Cell',
power: 30,
@@ -97,6 +102,7 @@ const TEMPLATES = {
},
indoorDAS: {
label: 'Indoor DAS',
tooltip: 'Indoor distributed antenna — in-building coverage (23 dBm, 2 dBi, 2100 MHz)',
style: 'rose',
name: 'Indoor DAS',
power: 23,
@@ -107,6 +113,7 @@ const TEMPLATES = {
},
uhfTactical: {
label: 'UHF Tactical',
tooltip: 'UHF tactical radio — man-portable field comms (25 dBm, 3 dBi, 450 MHz)',
style: 'amber',
name: 'UHF Tactical Radio',
power: 25,
@@ -117,6 +124,7 @@ const TEMPLATES = {
},
vhfRepeater: {
label: 'VHF Repeater',
tooltip: 'VHF repeater — long range voice/data relay (40 dBm, 6 dBi, 150 MHz)',
style: 'teal',
name: 'VHF Repeater',
power: 40,
@@ -203,8 +211,8 @@ export default function SiteConfigModal({
if (form.power < 10 || form.power > 50) {
newErrors.power = 'Power must be 10-50 dBm';
}
if (form.gain < 0 || form.gain > 25) {
newErrors.gain = 'Gain must be 0-25 dBi';
if (form.gain < 0 || form.gain > 30) {
newErrors.gain = 'Gain must be 0-30 dBi';
}
if (form.frequency < 100 || form.frequency > 6000) {
newErrors.frequency = 'Frequency must be 100-6000 MHz';
@@ -360,20 +368,20 @@ export default function SiteConfigModal({
label="Antenna Gain"
value={form.gain}
min={0}
max={25}
max={30}
step={0.5}
unit="dBi"
hint="Omni 2-8, Sector 15-18, Parabolic 20-25"
hint={
form.gain <= 8
? `Omni-directional (${form.gain} dBi)`
: form.gain <= 18
? `Sector/Panel (${form.gain} dBi)`
: `Parabolic/Dish (${form.gain} dBi)`
}
onChange={(v) => updateField('gain', v)}
/>
{/* Frequency */}
<FrequencySelector
value={form.frequency}
onChange={(v) => updateField('frequency', v)}
/>
{/* Band panel — UHF/VHF/LTE/5G grouped selector */}
{/* Band panel — UHF/VHF/LTE/5G grouped selector + custom input */}
<FrequencyBandPanel
value={form.frequency}
onChange={(v) => updateField('frequency', v)}
@@ -485,6 +493,7 @@ export default function SiteConfigModal({
key={key}
type="button"
onClick={() => applyTemplate(key as keyof typeof TEMPLATES)}
title={t.tooltip}
className={`px-3 py-1.5 rounded text-xs font-medium transition-colors min-h-[32px]
${TEMPLATE_COLORS[t.style] ?? TEMPLATE_COLORS.blue}`}
>

View File

@@ -19,8 +19,8 @@ function estimateAreaKm2(pointCount: number, resolutionM: number): number {
}
const LEVELS = [
{ label: 'Excellent', threshold: -70, color: 'bg-green-500' },
{ label: 'Good', threshold: -85, color: 'bg-lime-500' },
{ label: 'Excellent', threshold: -70, color: 'bg-blue-500' },
{ label: 'Good', threshold: -85, color: 'bg-green-500' },
{ label: 'Fair', threshold: -100, color: 'bg-yellow-500' },
{ label: 'Weak', threshold: -Infinity, color: 'bg-red-500' },
] as const;

View File

@@ -5,6 +5,7 @@
* and propagation model info for each band.
*/
import { useState } from 'react';
import { COMMON_FREQUENCIES, FREQUENCY_GROUPS, getWavelength } from '@/constants/frequencies.ts';
import type { FrequencyBand } from '@/types/index.ts';
@@ -54,11 +55,25 @@ function getBandForFrequency(freq: number): string | null {
export default function FrequencyBandPanel({ value, onChange }: FrequencyBandPanelProps) {
const currentBand = getBandForFrequency(value);
const [customInput, setCustomInput] = useState('');
const handleCustomSubmit = () => {
const parsed = parseInt(customInput, 10);
if (parsed > 0 && parsed <= 100000) {
onChange(parsed);
setCustomInput('');
}
};
return (
<div className="space-y-3">
<div className="text-xs font-semibold text-gray-500 dark:text-dark-muted uppercase tracking-wide">
Frequency Bands
<div className="flex items-center justify-between">
<div className="text-xs font-semibold text-gray-500 dark:text-dark-muted uppercase tracking-wide">
Operating Frequency
</div>
<div className="text-xs font-medium text-gray-600 dark:text-dark-muted">
{value} MHz
</div>
</div>
{(Object.keys(FREQUENCY_GROUPS) as Array<keyof typeof FREQUENCY_GROUPS>).map((bandType) => {
@@ -139,6 +154,28 @@ export default function FrequencyBandPanel({ value, onChange }: FrequencyBandPan
</div>
);
})}
{/* Custom frequency input */}
<div className="flex gap-2">
<input
type="number"
placeholder="Custom MHz..."
value={customInput}
onChange={(e) => setCustomInput(e.target.value)}
onKeyDown={(e) => e.key === 'Enter' && handleCustomSubmit()}
className="flex-1 px-2.5 py-1.5 border border-gray-300 dark:border-dark-border dark:bg-dark-bg dark:text-dark-text rounded-md text-xs
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
min={1}
max={100000}
/>
<button
type="button"
onClick={handleCustomSubmit}
className="px-3 py-1.5 bg-gray-200 hover:bg-gray-300 dark:bg-dark-border dark:hover:bg-dark-muted dark:text-dark-text rounded-md text-xs text-gray-700 min-h-[28px]"
>
Set
</button>
</div>
</div>
);
}

View File

@@ -0,0 +1,137 @@
import { useState } from 'react';
import { useCalcHistoryStore } from '@/store/calcHistory.ts';
import type { CalculationEntry } from '@/store/calcHistory.ts';
function EntryDetail({ entry }: { entry: CalculationEntry }) {
return (
<div className="mt-1.5 pt-1.5 border-t border-gray-100 dark:border-dark-border space-y-1.5 text-[10px]">
{/* Coverage breakdown with percentages */}
<div className="grid grid-cols-4 gap-1 text-center">
<div>
<div className="font-semibold text-blue-600 dark:text-blue-400">
{entry.coverage.excellent.toFixed(0)}%
</div>
<div className="text-gray-400">Excellent</div>
</div>
<div>
<div className="font-semibold text-green-600 dark:text-green-400">
{entry.coverage.good.toFixed(0)}%
</div>
<div className="text-gray-400">Good</div>
</div>
<div>
<div className="font-semibold text-yellow-600 dark:text-yellow-400">
{entry.coverage.fair.toFixed(0)}%
</div>
<div className="text-gray-400">Fair</div>
</div>
<div>
<div className="font-semibold text-red-600 dark:text-red-400">
{entry.coverage.weak.toFixed(0)}%
</div>
<div className="text-gray-400">Weak</div>
</div>
</div>
{/* RSRP details */}
<div className="flex justify-between text-gray-500 dark:text-dark-muted">
<span>Avg RSRP: {entry.avgRsrp.toFixed(1)} dBm</span>
<span>Range: {entry.rangeMin.toFixed(0)} / {entry.rangeMax.toFixed(0)} dBm</span>
</div>
</div>
);
}
export default function HistoryPanel() {
const entries = useCalcHistoryStore((s) => s.entries);
const clearHistory = useCalcHistoryStore((s) => s.clearHistory);
const [expanded, setExpanded] = useState(false);
const [expandedEntry, setExpandedEntry] = useState<string | null>(null);
if (entries.length === 0) return null;
return (
<div className="bg-white dark:bg-dark-surface border border-gray-200 dark:border-dark-border rounded-lg shadow-sm p-4">
<div className="flex items-center justify-between">
<button
onClick={() => setExpanded(!expanded)}
className="flex items-center gap-1 text-sm font-semibold text-gray-800 dark:text-dark-text"
>
<span className="text-[10px]">{expanded ? '\u25BC' : '\u25B6'}</span>
Session History
<span className="text-xs text-gray-400 dark:text-dark-muted font-normal ml-1">
({entries.length})
</span>
</button>
{expanded && (
<button
onClick={clearHistory}
className="text-[10px] text-red-400 hover:text-red-600 dark:text-red-500 dark:hover:text-red-400 transition-colors"
>
Clear All
</button>
)}
</div>
{expanded && (
<div className="mt-2 space-y-1.5 max-h-80 overflow-y-auto">
{entries.map((entry) => {
const isOpen = expandedEntry === entry.id;
return (
<button
key={entry.id}
onClick={() => setExpandedEntry(isOpen ? null : entry.id)}
className="w-full text-left text-xs border border-gray-100 dark:border-dark-border rounded p-2 space-y-1 hover:bg-gray-50 dark:hover:bg-dark-bg transition-colors cursor-pointer"
>
{/* Row 1: timestamp + computation time */}
<div className="flex justify-between items-center">
<span className="text-gray-500 dark:text-dark-muted">
{entry.timestamp.toLocaleTimeString()}
</span>
<span className="font-bold text-gray-800 dark:text-dark-text">
{entry.computationTime.toFixed(1)}s
</span>
</div>
{/* Row 2: badges */}
<div className="flex gap-1.5 flex-wrap text-[10px]">
<span className="px-1 py-0.5 bg-blue-50 dark:bg-blue-900/20 text-blue-700 dark:text-blue-300 rounded">
{entry.preset}
</span>
<span className="text-gray-500 dark:text-dark-muted">
{entry.totalPoints.toLocaleString()} pts
</span>
<span className="text-gray-500 dark:text-dark-muted">
{entry.radius}km
</span>
<span className="text-gray-500 dark:text-dark-muted">
{entry.resolution}m
</span>
</div>
{/* Coverage bar */}
<div className="flex h-1.5 rounded-full overflow-hidden bg-gray-100 dark:bg-dark-border">
{entry.coverage.excellent > 0 && (
<div className="bg-blue-500" style={{ width: `${entry.coverage.excellent}%` }} />
)}
{entry.coverage.good > 0 && (
<div className="bg-green-500" style={{ width: `${entry.coverage.good}%` }} />
)}
{entry.coverage.fair > 0 && (
<div className="bg-yellow-500" style={{ width: `${entry.coverage.fair}%` }} />
)}
{entry.coverage.weak > 0 && (
<div className="bg-red-500" style={{ width: `${entry.coverage.weak}%` }} />
)}
</div>
{/* Expandable detail */}
{isOpen && <EntryDetail entry={entry} />}
</button>
);
})}
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,163 @@
import { useEffect, useState, useRef, useCallback } from 'react';
import { useCoverageStore } from '@/store/coverage.ts';
import type { CoverageResult } from '@/types/index.ts';
function classifyCoverage(points: Array<{ rsrp: number }>) {
const counts = { excellent: 0, good: 0, fair: 0, weak: 0 };
for (const p of points) {
if (p.rsrp > -70) counts.excellent++;
else if (p.rsrp > -85) counts.good++;
else if (p.rsrp > -100) counts.fair++;
else counts.weak++;
}
return counts;
}
const AUTO_DISMISS_MS = 10_000;
export default function ResultsPanel() {
const result = useCoverageStore((s) => s.result);
const [visible, setVisible] = useState(false);
const [show, setShow] = useState(false);
const timerRef = useRef<ReturnType<typeof setTimeout> | undefined>(undefined);
const prevResultRef = useRef<CoverageResult | null>(null);
const dismiss = useCallback(() => {
setVisible(false);
setTimeout(() => setShow(false), 300);
}, []);
useEffect(() => {
// Only trigger on NEW result (not initial mount with existing result)
if (result && result !== prevResultRef.current && result.points.length > 0) {
setShow(true);
requestAnimationFrame(() => setVisible(true));
if (timerRef.current) clearTimeout(timerRef.current);
timerRef.current = setTimeout(dismiss, AUTO_DISMISS_MS);
}
prevResultRef.current = result;
return () => {
if (timerRef.current) clearTimeout(timerRef.current);
};
}, [result, dismiss]);
if (!show || !result) return null;
const counts = classifyCoverage(result.points);
const total = result.points.length;
const preset = result.settings.preset ?? 'standard';
const timeStr = result.calculationTime.toFixed(1);
return (
<>
<style>{`@keyframes rfcp-shrink { from { width: 100%; } to { width: 0%; } }`}</style>
<div
className={`absolute top-4 left-4 z-[1000] w-72
bg-white/95 dark:bg-dark-surface/95 backdrop-blur-sm
border border-gray-200 dark:border-dark-border rounded-lg shadow-lg
transition-all duration-300 ease-out pointer-events-auto
${visible ? 'opacity-100 translate-x-0' : 'opacity-0 -translate-x-8'}`}
>
{/* Header */}
<div className="flex items-center justify-between px-3 pt-3 pb-1">
<h3 className="text-xs font-semibold text-gray-700 dark:text-dark-text">
Calculation Complete
</h3>
<button
onClick={dismiss}
className="text-gray-400 hover:text-gray-600 dark:hover:text-dark-text text-sm leading-none"
>
&times;
</button>
</div>
{/* Body */}
<div className="px-3 pb-3 space-y-2">
{/* Time + points */}
<div className="flex items-baseline gap-2">
<span className="text-lg font-bold text-gray-800 dark:text-dark-text">
{timeStr}s
</span>
<span className="text-xs text-gray-500 dark:text-dark-muted">
{total.toLocaleString()} points
</span>
</div>
{/* Coverage breakdown bar */}
<div className="flex h-2 rounded-full overflow-hidden">
{counts.excellent > 0 && (
<div className="bg-blue-500" style={{ width: `${(counts.excellent / total) * 100}%` }} />
)}
{counts.good > 0 && (
<div className="bg-green-500" style={{ width: `${(counts.good / total) * 100}%` }} />
)}
{counts.fair > 0 && (
<div className="bg-yellow-500" style={{ width: `${(counts.fair / total) * 100}%` }} />
)}
{counts.weak > 0 && (
<div className="bg-red-500" style={{ width: `${(counts.weak / total) * 100}%` }} />
)}
</div>
{/* Coverage percentages */}
<div className="grid grid-cols-4 gap-1 text-center text-[10px]">
<div>
<div className="font-semibold text-blue-600 dark:text-blue-400">
{total > 0 ? ((counts.excellent / total) * 100).toFixed(0) : 0}%
</div>
<div className="text-gray-400">Exc</div>
</div>
<div>
<div className="font-semibold text-green-600 dark:text-green-400">
{total > 0 ? ((counts.good / total) * 100).toFixed(0) : 0}%
</div>
<div className="text-gray-400">Good</div>
</div>
<div>
<div className="font-semibold text-yellow-600 dark:text-yellow-400">
{total > 0 ? ((counts.fair / total) * 100).toFixed(0) : 0}%
</div>
<div className="text-gray-400">Fair</div>
</div>
<div>
<div className="font-semibold text-red-600 dark:text-red-400">
{total > 0 ? ((counts.weak / total) * 100).toFixed(0) : 0}%
</div>
<div className="text-gray-400">Weak</div>
</div>
</div>
{/* Metadata */}
<div className="flex flex-wrap gap-1.5 text-[10px] text-gray-500 dark:text-dark-muted">
<span className="px-1.5 py-0.5 bg-gray-100 dark:bg-dark-border rounded">
{preset}
</span>
<span className="px-1.5 py-0.5 bg-gray-100 dark:bg-dark-border rounded">
{result.settings.radius}km
</span>
<span className="px-1.5 py-0.5 bg-gray-100 dark:bg-dark-border rounded">
{result.settings.resolution}m
</span>
{result.modelsUsed && result.modelsUsed.length > 0 && (
<span className="px-1.5 py-0.5 bg-gray-100 dark:bg-dark-border rounded">
{result.modelsUsed.length} models
</span>
)}
</div>
</div>
{/* Auto-dismiss progress bar */}
<div className="h-0.5 bg-gray-100 dark:bg-dark-border rounded-b-lg overflow-hidden">
<div
className="h-full bg-blue-400 dark:bg-blue-500"
style={{
animation: `rfcp-shrink ${AUTO_DISMISS_MS}ms linear forwards`,
}}
/>
</div>
</div>
</>
);
}

View File

@@ -33,6 +33,7 @@ interface PendingCalc {
class WebSocketService {
private ws: WebSocket | null = null;
private reconnectTimer: ReturnType<typeof setTimeout> | undefined;
private pingTimer: ReturnType<typeof setInterval> | undefined;
private _connected = false;
private _pendingCalcs = new Map<string, PendingCalc>();
private _connectionListeners = new Set<ConnectionCallback>();
@@ -70,10 +71,20 @@ class WebSocketService {
this.ws.onopen = () => {
this._setConnected(true);
// Keepalive pings every 30s to prevent connection timeout during long calculations
if (this.pingTimer) clearInterval(this.pingTimer);
this.pingTimer = setInterval(() => {
if (this.ws?.readyState === WebSocket.OPEN) {
this.ws.send(JSON.stringify({ type: 'ping' }));
}
}, 30_000);
};
this.ws.onclose = () => {
this._setConnected(false);
if (this.pingTimer) { clearInterval(this.pingTimer); this.pingTimer = undefined; }
// Fail all pending calculations — their callbacks reference the old socket
this._failPendingCalcs('WebSocket disconnected');
this.reconnectTimer = setTimeout(() => this.connect(), 2000);
};
@@ -121,8 +132,18 @@ class WebSocketService {
};
}
/** Fail all pending calculations (e.g. on disconnect). */
private _failPendingCalcs(reason: string): void {
for (const [calcId, pending] of this._pendingCalcs) {
try { pending.onError(reason); } catch { /* ignore */ }
this._pendingCalcs.delete(calcId);
}
}
disconnect(): void {
if (this.reconnectTimer) clearTimeout(this.reconnectTimer);
if (this.pingTimer) { clearInterval(this.pingTimer); this.pingTimer = undefined; }
this._failPendingCalcs('WebSocket disconnected');
this.ws?.close();
this.ws = null;
this._setConnected(false);

View File

@@ -0,0 +1,38 @@
import { create } from 'zustand';
export interface CalculationEntry {
id: string;
timestamp: Date;
preset: string;
radius: number;
resolution: number;
computationTime: number;
totalPoints: number;
coverage: { excellent: number; good: number; fair: number; weak: number };
avgRsrp: number;
rangeMin: number;
rangeMax: number;
}
interface CalcHistoryState {
entries: CalculationEntry[];
addEntry: (entry: CalculationEntry) => void;
clearHistory: () => void;
}
const MAX_ENTRIES = 50;
export const useCalcHistoryStore = create<CalcHistoryState>((set) => ({
entries: [],
addEntry: (entry) =>
set((state) => {
const entries = [entry, ...state.entries];
if (entries.length > MAX_ENTRIES) {
entries.length = MAX_ENTRIES;
}
return { entries };
}),
clearHistory: () => set({ entries: [] }),
}));

View File

@@ -3,6 +3,9 @@ import { api } from '@/services/api.ts';
import { wsService } from '@/services/websocket.ts';
import type { WSProgress } from '@/services/websocket.ts';
import { useSitesStore } from '@/store/sites.ts';
import { useToastStore } from '@/components/ui/Toast.tsx';
import { useCalcHistoryStore } from '@/store/calcHistory.ts';
import type { CalculationEntry } from '@/store/calcHistory.ts';
import type { CoverageResult, CoverageSettings, CoverageApiStats } from '@/types/index.ts';
import type { ApiSiteParams, CoverageResponse } from '@/services/api.ts';
@@ -49,7 +52,7 @@ function buildApiSettings(settings: CoverageSettings) {
return {
radius: settings.radius * 1000, // km → meters
resolution: settings.resolution,
min_signal: settings.rsrpThreshold,
min_signal: -130, // Send all useful points; frontend filters visually via rsrpThreshold
preset: settings.preset,
use_terrain: settings.use_terrain,
use_buildings: settings.use_buildings,
@@ -92,6 +95,44 @@ function responseToResult(response: CoverageResponse, settings: CoverageSettings
};
}
function buildHistoryEntry(result: CoverageResult): CalculationEntry {
const counts = { excellent: 0, good: 0, fair: 0, weak: 0 };
let minRsrp = Infinity;
let maxRsrp = -Infinity;
for (const p of result.points) {
if (p.rsrp > -70) counts.excellent++;
else if (p.rsrp > -85) counts.good++;
else if (p.rsrp > -100) counts.fair++;
else counts.weak++;
if (p.rsrp < minRsrp) minRsrp = p.rsrp;
if (p.rsrp > maxRsrp) maxRsrp = p.rsrp;
}
const total = result.points.length;
const avgRsrp = result.stats?.avg_rsrp
?? (total > 0 ? result.points.reduce((s, p) => s + p.rsrp, 0) / total : 0);
return {
id: crypto.randomUUID(),
timestamp: new Date(),
preset: result.settings.preset ?? 'standard',
radius: result.settings.radius,
resolution: result.settings.resolution,
computationTime: result.calculationTime,
totalPoints: result.totalPoints,
coverage: {
excellent: total > 0 ? (counts.excellent / total) * 100 : 0,
good: total > 0 ? (counts.good / total) * 100 : 0,
fair: total > 0 ? (counts.fair / total) * 100 : 0,
weak: total > 0 ? (counts.weak / total) * 100 : 0,
},
avgRsrp,
rangeMin: minRsrp === Infinity ? 0 : minRsrp,
rangeMax: maxRsrp === -Infinity ? 0 : maxRsrp,
};
}
export const useCoverageStore = create<CoverageState>((set, get) => ({
result: null,
isCalculating: false,
@@ -163,12 +204,36 @@ export const useCoverageStore = create<CoverageState>((set, get) => ({
apiSettings as unknown as Record<string, unknown>,
// onResult
(data) => {
const result = responseToResult(data, settings);
set({ result, isCalculating: false, error: null, progress: null, activeCalcId: null });
try {
const result = responseToResult(data, settings);
set({ result, isCalculating: false, error: null, progress: null, activeCalcId: null });
// Show success toast for WS result
const addToast = useToastStore.getState().addToast;
if (result.points.length === 0) {
addToast('No coverage points. Try increasing radius.', 'warning');
} else {
const timeStr = result.calculationTime.toFixed(1);
const modelsStr = result.modelsUsed?.length
? ` \u2022 ${result.modelsUsed.length} models`
: '';
addToast(
`Calculated ${result.totalPoints.toLocaleString()} points in ${timeStr}s${modelsStr}`,
'success'
);
}
// Push to session history
if (result.points.length > 0) {
useCalcHistoryStore.getState().addEntry(buildHistoryEntry(result));
}
} catch (err) {
console.error('[Coverage] Failed to process result:', err);
set({ isCalculating: false, error: 'Failed to process coverage result', progress: null, activeCalcId: null });
}
},
// onError
(error) => {
set({ isCalculating: false, error, progress: null, activeCalcId: null });
useToastStore.getState().addToast(`Calculation failed: ${error}`, 'error');
},
// onProgress
(progress) => {
@@ -191,6 +256,10 @@ export const useCoverageStore = create<CoverageState>((set, get) => ({
const result = responseToResult(response, settings);
set({ result, isCalculating: false, error: null });
// Push to session history
if (result.points.length > 0) {
useCalcHistoryStore.getState().addEntry(buildHistoryEntry(result));
}
} catch (err) {
if (err instanceof Error && err.name === 'AbortError') {
set({ isCalculating: false });

View File

@@ -1,10 +1,11 @@
/**
* RSRP → color mapping with smooth gradient interpolation.
*
* Purple → Orange palette:
* -130 dBm = deep purple (no service)
* -90 dBm = peach (fair)
* -50 dBm = bright orange (excellent)
* CloudRF-style Red → Blue palette:
* -130 dBm = dark red (no service)
* -100 dBm = yellow (fair)
* -70 dBm = green (good)
* -50 dBm = deep blue (excellent)
*
* All functions are pure and allocation-free on the hot path
* (pre-built lookup table for fast per-pixel color resolution).
@@ -18,14 +19,13 @@ interface GradientStop {
}
const GRADIENT_STOPS: GradientStop[] = [
{ value: 0.0, r: 26, g: 0, b: 51 }, // #1a0033 — deep purple (no service)
{ value: 0.15, r: 74, g: 20, b: 140 }, // #4a148c — dark purple
{ value: 0.30, r: 123, g: 31, b: 162 }, // #7b1fa2 — purple (very weak)
{ value: 0.45, r: 171, g: 71, b: 188 }, // #ab47bc — light purple (weak)
{ value: 0.60, r: 255, g: 138, b: 101 }, // #ff8a65 — peach (fair)
{ value: 0.75, r: 255, g: 111, b: 0 }, // #ff6f00 — dark orange (good)
{ value: 0.85, r: 255, g: 152, b: 0 }, // #ff9800 — orange (strong)
{ value: 1.0, r: 255, g: 183, b: 77 }, // #ffb74d — bright orange (excellent)
{ value: 0.0, r: 127, g: 0, b: 0 }, // #7f0000 — dark red (no service)
{ value: 0.15, r: 239, g: 68, b: 68 }, // #EF4444 — red (very weak)
{ value: 0.30, r: 249, g: 115, b: 22 }, // #F97316 — orange (weak)
{ value: 0.50, r: 234, g: 179, b: 8 }, // #EAB308 — yellow (fair)
{ value: 0.70, r: 34, g: 197, b: 94 }, // #22C55E — green (good)
{ value: 0.85, r: 59, g: 130, b: 246 }, // #3B82F6 — blue (strong)
{ value: 1.0, r: 37, g: 99, b: 235 }, // #2563EB — deep blue (excellent)
];
/**

File diff suppressed because one or more lines are too long