@mytec: iter3.2.1 start

This commit is contained in:
2026-02-01 23:51:21 +02:00
parent defa3ad440
commit b5b2fd90d2
4 changed files with 1057 additions and 23 deletions

View File

@@ -0,0 +1,332 @@
# RFCP - Iteration 3.1.0: LOD (Level of Detail) Optimization
## Overview
Detailed preset times out at 300s because dominant_path_service calculates expensive geometry for ALL 868 points. This iteration adds distance-based LOD to skip or simplify calculations for distant points, reducing total time to <60s.
**Current:** 302.8ms/point × 868 points = 262s (TIMEOUT)
**Target:** ~33s total (8x speedup)
---
## Issues Identified
**Problem 1: All points get full dominant_path calculation**
- Root Cause: No distance-based filtering
- Impact: Points >3km from TX still check 25+ buildings × 150+ walls
- At these distances, building-level detail provides minimal accuracy benefit
**Problem 2: dominant_path is O(points × buildings × walls)**
- Root Cause: Algorithmic complexity
- Impact: 868 × 25 × 150 = 3.2M intersection checks
- Each check is ~0.1ms = 320 seconds theoretical minimum
---
## Solution: Distance-Based LOD
### LOD Levels
```
Distance > 3km → LOD_NONE → Skip dominant_path entirely (0 buildings)
Distance 1.5-3km → LOD_SIMPLIFIED → Check only 5 nearest buildings
Distance < 1.5km → LOD_FULL → Full calculation (current behavior)
```
### Expected Performance
| LOD Level | Distance | Points (~) | Time/point | Total |
|-------------|-----------|------------|------------|---------|
| NONE | >3km | 600 (70%) | ~2ms | 1.2s |
| SIMPLIFIED | 1.5-3km | 180 (20%) | ~30ms | 5.4s |
| FULL | <1.5km | 88 (10%) | ~300ms | 26.4s |
| **TOTAL** | | 868 | | **~33s**|
---
## Implementation
### Step 1: Add LOD constants to dominant_path_service.py
**File:** `backend/app/services/dominant_path_service.py`
**Add at top of file (after imports):**
```python
from enum import Enum
class LODLevel(Enum):
"""Level of Detail for dominant path calculations"""
NONE = "none" # Skip dominant path entirely
SIMPLIFIED = "simplified" # Check only nearest buildings
FULL = "full" # Full calculation
# LOD distance thresholds (meters)
LOD_THRESHOLD_NONE = 3000 # >3km: skip dominant path
LOD_THRESHOLD_SIMPLIFIED = 1500 # 1.5-3km: simplified mode
# Simplified mode limits
SIMPLIFIED_MAX_BUILDINGS = 5
SIMPLIFIED_MAX_WALLS = 50
```
### Step 2: Add get_lod_level() function
**File:** `backend/app/services/dominant_path_service.py`
**Add function:**
```python
def get_lod_level(distance_m: float) -> LODLevel:
"""
Determine LOD level based on TX-RX distance.
At long distances, building-level multipath contributes
minimally to path loss - macro propagation models suffice.
"""
if distance_m > LOD_THRESHOLD_NONE:
return LODLevel.NONE
elif distance_m > LOD_THRESHOLD_SIMPLIFIED:
return LODLevel.SIMPLIFIED
else:
return LODLevel.FULL
```
### Step 3: Create find_dominant_path_with_lod() wrapper
**File:** `backend/app/services/dominant_path_service.py`
**Add function (this wraps existing logic):**
```python
def find_dominant_path_with_lod(
tx_lat: float, tx_lon: float, tx_height: float,
rx_lat: float, rx_lon: float, rx_height: float,
frequency_mhz: float,
buildings: list,
distance_m: float = None
) -> dict:
"""
Find dominant path with LOD optimization.
Args:
tx_lat, tx_lon, tx_height: Transmitter position
rx_lat, rx_lon, rx_height: Receiver position
frequency_mhz: Operating frequency
buildings: List of building dicts from OSM
distance_m: Pre-calculated TX-RX distance (optional, saves recalc)
Returns:
dict with:
- path_loss_db: Additional path loss from buildings (0 if skipped)
- lod_level: Which LOD was applied
- buildings_checked: How many buildings were evaluated
- walls_checked: How many walls were evaluated
- skipped: True if dominant_path was skipped entirely
"""
from app.services.terrain_service import TerrainService
# Calculate distance if not provided
if distance_m is None:
distance_m = TerrainService.haversine_distance(tx_lat, tx_lon, rx_lat, rx_lon)
lod = get_lod_level(distance_m)
# LOD_NONE: Skip dominant path entirely
if lod == LODLevel.NONE:
return {
"path_loss_db": 0.0,
"lod_level": "none",
"buildings_checked": 0,
"walls_checked": 0,
"skipped": True
}
# Filter buildings for LOD_SIMPLIFIED
buildings_to_check = buildings
if lod == LODLevel.SIMPLIFIED and buildings:
if len(buildings) > SIMPLIFIED_MAX_BUILDINGS:
# Sort by distance to path midpoint and take nearest
mid_lat = (tx_lat + rx_lat) / 2
mid_lon = (tx_lon + rx_lon) / 2
buildings_with_dist = []
for b in buildings:
# Get building centroid from geometry
geom = b.get('geometry', {})
coords = geom.get('coordinates', [[]])[0] if isinstance(geom, dict) else b.get('geometry', [[]])
if coords and len(coords) > 0:
# Handle both formats: [[lon,lat],...] or [{'lon':..,'lat':..},...]
if isinstance(coords[0], (list, tuple)):
blat = sum(c[1] for c in coords) / len(coords)
blon = sum(c[0] for c in coords) / len(coords)
else:
blat = sum(c.get('lat', c.get('y', 0)) for c in coords) / len(coords)
blon = sum(c.get('lon', c.get('x', 0)) for c in coords) / len(coords)
dist = TerrainService.haversine_distance(mid_lat, mid_lon, blat, blon)
buildings_with_dist.append((dist, b))
buildings_with_dist.sort(key=lambda x: x[0])
buildings_to_check = [b for _, b in buildings_with_dist[:SIMPLIFIED_MAX_BUILDINGS]]
# Call existing dominant path function
# Look for existing function: find_dominant_path_vectorized, find_dominant_paths, etc.
try:
# Try vectorized version first
result = find_dominant_path_vectorized(
tx_lat, tx_lon,
rx_lat, rx_lon,
buildings_to_check,
frequency_mhz
)
except (NameError, AttributeError):
# Fall back to sync version if vectorized not available
try:
result = dominant_path_service.find_dominant_paths(
tx_lat, tx_lon, tx_height,
rx_lat, rx_lon, rx_height,
frequency_mhz,
buildings_to_check
)
except:
# If no dominant path function works, return zero loss
result = {"path_loss_db": 0.0}
# Ensure result is dict
if not isinstance(result, dict):
result = {"path_loss_db": float(result) if result else 0.0}
# Add LOD metadata
result["lod_level"] = lod.value
result["buildings_checked"] = len(buildings_to_check)
result["skipped"] = False
return result
```
### Step 4: Add logging for LOD decisions
**File:** `backend/app/services/dominant_path_service.py`
**Add after LOD decision (inside find_dominant_path_with_lod):**
```python
import logging
logger = logging.getLogger(__name__)
# Add this right after lod = get_lod_level(distance_m):
if lod == LODLevel.NONE:
logger.debug(f"[DOMINANT_PATH] LOD=none, dist={distance_m:.0f}m, skipped")
elif lod == LODLevel.SIMPLIFIED:
logger.debug(f"[DOMINANT_PATH] LOD=simplified, dist={distance_m:.0f}m, buildings={len(buildings_to_check)}")
else:
logger.debug(f"[DOMINANT_PATH] LOD=full, dist={distance_m:.0f}m, buildings={len(buildings_to_check)}")
```
### Step 5: Update coverage calculation to use LOD wrapper
**File:** `backend/app/services/coverage_service.py` OR `backend/app/services/parallel_coverage_service.py`
**Find where dominant_path is called and replace with LOD version:**
```python
# BEFORE (find lines like this):
dominant_result = find_dominant_path_vectorized(tx, rx, buildings, ...)
# or
dominant_result = dominant_path_service.find_dominant_paths(...)
# AFTER (replace with):
from app.services.dominant_path_service import find_dominant_path_with_lod
dominant_result = find_dominant_path_with_lod(
tx_lat, tx_lon, tx_height,
rx_lat, rx_lon, rx_height,
frequency_mhz,
buildings,
distance_m=point_distance # Pass pre-calculated distance if available
)
# Use the result
if not dominant_result.get("skipped", False):
total_loss += dominant_result.get("path_loss_db", 0.0)
```
### Step 6: Update worker function (if using parallel processing)
**File:** `backend/app/parallel/worker.py` OR wherever worker calculates points
**Same pattern - use find_dominant_path_with_lod instead of direct calls.**
---
## Testing Checklist
- [ ] LODLevel enum imports correctly
- [ ] get_lod_level(4000) returns LODLevel.NONE
- [ ] get_lod_level(2000) returns LODLevel.SIMPLIFIED
- [ ] get_lod_level(1000) returns LODLevel.FULL
- [ ] Detailed preset completes without timeout
- [ ] Detailed preset time < 90 seconds (target: ~33s)
- [ ] Standard preset still works (regression check)
- [ ] Logs show LOD decisions: "LOD=none", "LOD=simplified", "LOD=full"
- [ ] Coverage map looks reasonable (no obvious artifacts at LOD boundaries)
---
## Build & Deploy
```powershell
# Backend
cd D:\root\rfcp\backend
pip install -e .
# Test
cd D:\root\rfcp\installer
.\test-detailed-quick.bat
# If works, rebuild executable
cd D:\root\rfcp\installer
pyinstaller rfcp-server.spec --clean
```
---
## Commit Message
```
feat(backend): add LOD optimization for dominant_path (v3.1.0)
- Add LODLevel enum (NONE, SIMPLIFIED, FULL)
- Add distance thresholds: >3km skip, 1.5-3km simplified, <1.5km full
- Create find_dominant_path_with_lod() wrapper
- Update coverage calculation to use LOD
- Expected: 8x speedup for Detailed preset (262s -> ~33s)
Phase 3.1.0: Performance Optimization
```
---
## Success Criteria
1. **Performance:** Detailed preset completes in <90 seconds (target ~33s)
2. **No regression:** Standard preset still works, same speed
3. **Logging:** Can see LOD level in server output
4. **Quality:** Coverage map visually acceptable (no obvious LOD boundary artifacts)
---
## Notes for Claude Code
- The existing codebase has multiple dominant_path functions - find the one actually being used
- Check both `coverage_service.py` and `parallel_coverage_service.py`
- Worker processes may have their own copy of the function - update those too
- If `find_dominant_path_vectorized` doesn't exist as standalone function, look for it in a class
- haversine_distance might be in TerrainService or as standalone function - check imports
- Building geometry format varies - handle both `[[lon,lat],...]` and `[{lon:...,lat:...},...]`
---
*"Not all points are created equal - distant ones deserve less attention"*

View File

@@ -0,0 +1,633 @@
# RFCP - Iteration 3.2.0: Comprehensive Performance & Bug Fixes
## Overview
Major iteration combining performance optimizations, UI fixes, and bug resolutions. This addresses the Detailed preset timeout, stuck progress bar, app close issues, region data problems, and UX improvements.
**Scope:** Backend optimizations + Frontend fixes + Electron fixes + Data validation
---
## Part 1: Performance Optimizations
### 1.1 Adaptive Resolution (CRITICAL)
**Problem:** 10km radius with 200m resolution = ~7850 points → timeout
**Solution:** Distance-based adaptive resolution
**File:** `backend/app/services/coverage_service.py` (or grid generation code)
```python
def get_adaptive_resolution(base_resolution: float, distance_from_tx: float) -> float:
"""
Adaptive resolution based on distance from transmitter.
Close to TX: use user's chosen resolution (details matter)
Far from TX: use coarser resolution (macro view sufficient)
"""
if distance_from_tx < 2000: # < 2km
return base_resolution # User's choice (e.g., 200m)
elif distance_from_tx < 5000: # 2-5km
return max(base_resolution, 300) # At least 300m
else: # > 5km
return max(base_resolution, 500) # At least 500m
def generate_adaptive_grid(center_lat, center_lon, radius_m, base_resolution):
"""
Generate grid with adaptive resolution zones.
Instead of uniform grid, create concentric zones with different resolutions.
"""
points = []
# Zone 1: Inner (< 2km) - full resolution
inner_points = generate_grid_ring(center_lat, center_lon, 0, 2000, base_resolution)
points.extend(inner_points)
# Zone 2: Middle (2-5km) - medium resolution
if radius_m > 2000:
medium_res = max(base_resolution, 300)
middle_points = generate_grid_ring(center_lat, center_lon, 2000, min(5000, radius_m), medium_res)
points.extend(middle_points)
# Zone 3: Outer (5km+) - coarse resolution
if radius_m > 5000:
coarse_res = max(base_resolution, 500)
outer_points = generate_grid_ring(center_lat, center_lon, 5000, radius_m, coarse_res)
points.extend(outer_points)
return points
```
**Expected result:**
- 10km with 200m base: ~7850 → ~2500 points (3x reduction)
- Combined with LOD: 10km detailed should complete in ~60s
### 1.2 Radial Preview Mode (NEW FEATURE)
**Purpose:** Instant preview using 360 radial spokes instead of full grid
**File:** `backend/app/services/coverage_service.py`
```python
def calculate_radial_preview(
tx_lat: float, tx_lon: float, tx_height: float,
radius_m: float, frequency_mhz: float,
num_spokes: int = 360, # 1 degree resolution
points_per_spoke: int = 50
) -> List[dict]:
"""
Fast radial preview calculation.
Instead of grid, calculate along 360 radial lines from TX.
Much faster because:
- Terrain profile can be cached per spoke
- No building calculations (terrain only)
- Linear interpolation between points
"""
results = []
for angle_deg in range(num_spokes):
angle_rad = math.radians(angle_deg)
# Calculate points along this spoke
for i in range(points_per_spoke):
distance = (i + 1) * (radius_m / points_per_spoke)
# Calculate point position
rx_lat, rx_lon = move_point(tx_lat, tx_lon, distance, angle_deg)
# Simple terrain-only calculation (no buildings)
path_loss = calculate_terrain_path_loss(
tx_lat, tx_lon, tx_height,
rx_lat, rx_lon, 1.5, # Standard UE height
frequency_mhz
)
results.append({
'lat': rx_lat,
'lon': rx_lon,
'rsrp': tx_power_dbm - path_loss,
'distance': distance,
'angle': angle_deg
})
return results
```
**Add to API:** New endpoint or parameter `?mode=preview`
---
## Part 2: Bug Fixes
### 2.1 Progress Bar Stuck at "Initializing 5%" (CRITICAL)
**Problem:** Progress never updates past 5%
**Root Cause:** WebSocket messages not reaching frontend OR React state not updating
**Debug & Fix Steps:**
**Step 1: Backend - Verify messages are sent**
File: `backend/app/api/websocket.py` or `backend/app/services/coverage_service.py`
```python
import logging
logger = logging.getLogger(__name__)
async def send_progress(websocket, progress: int, status: str):
"""Send progress with logging"""
message = {"type": "progress", "progress": progress, "status": status}
logger.info(f"[WS] Sending progress: {progress}% - {status}")
await websocket.send_json(message)
await asyncio.sleep(0) # Yield to event loop
```
**Step 2: Frontend - Check WebSocket handling**
File: `frontend/src/services/websocket.ts` or similar
```typescript
// Add logging
socket.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('[WS] Received:', data);
if (data.type === 'progress') {
console.log('[WS] Progress update:', data.progress, data.status);
// Update store
setCoverageProgress(data.progress, data.status);
}
};
```
**Step 3: Frontend - Check React state update**
File: `frontend/src/store/coverage.ts` or state management
```typescript
// Ensure state updates trigger re-render
setCoverageProgress: (progress: number, status: string) => {
console.log('[Store] Setting progress:', progress, status);
set({
progress: progress, // Must be new value, not mutation
progressStatus: status
});
}
```
**Step 4: Frontend - Check component subscription**
File: `frontend/src/App.tsx` or progress display component
```typescript
// Ensure component subscribes to store changes
const progress = useCoverageStore((state) => state.progress);
const progressStatus = useCoverageStore((state) => state.progressStatus);
useEffect(() => {
console.log('[UI] Progress changed:', progress, progressStatus);
}, [progress, progressStatus]);
```
### 2.2 App Close Button Broken (Electron)
**Problem:** Clicking X kills backend but Electron window stays open
**File:** `desktop/main.js`
```javascript
const { app, BrowserWindow } = require('electron');
const { spawn } = require('child_process');
let mainWindow;
let backendProcess;
function createWindow() {
mainWindow = new BrowserWindow({
// ... existing config
});
// Handle window close
mainWindow.on('close', async (event) => {
event.preventDefault(); // Prevent immediate close
console.log('[Electron] Window closing, cleaning up...');
// Kill backend process
if (backendProcess) {
console.log('[Electron] Killing backend process...');
backendProcess.kill('SIGTERM');
// Wait for graceful shutdown
await new Promise(resolve => setTimeout(resolve, 1000));
// Force kill if still running
if (!backendProcess.killed) {
backendProcess.kill('SIGKILL');
}
}
// Now actually close
mainWindow.destroy();
});
mainWindow.on('closed', () => {
mainWindow = null;
});
}
// Ensure app quits when all windows closed
app.on('window-all-closed', () => {
console.log('[Electron] All windows closed, quitting app');
app.quit();
});
// Cleanup on app quit
app.on('before-quit', () => {
console.log('[Electron] App quitting, final cleanup');
if (backendProcess && !backendProcess.killed) {
backendProcess.kill('SIGKILL');
}
});
```
### 2.3 Memory Leak (1328 MB not released)
**Problem:** Memory not freed after calculation
**File:** `backend/app/services/parallel_coverage_service.py` or `backend/app/parallel/manager.py`
```python
import gc
from multiprocessing import shared_memory
class SharedMemoryManager:
def __init__(self):
self._shared_blocks = []
def create_shared_block(self, name, size):
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
self._shared_blocks.append(shm)
return shm
def cleanup(self):
"""Explicitly cleanup all shared memory blocks"""
for shm in self._shared_blocks:
try:
shm.close()
shm.unlink() # Important! Actually frees the memory
except Exception as e:
logger.warning(f"Error cleaning up shared memory: {e}")
self._shared_blocks.clear()
# Force garbage collection
gc.collect()
# In coverage calculation:
async def calculate_coverage(...):
shm_manager = SharedMemoryManager()
try:
# ... calculation code
pass
finally:
# ALWAYS cleanup, even on error/timeout
shm_manager.cleanup()
logger.info("[MEMORY] Shared memory cleaned up")
```
### 2.4 Region Data / Map Cache Issues
**Problem:** Western Ukraine region hangs or produces "No coverage points found"
**Diagnosis steps:**
**Step 1: Check ProgramData folder structure**
```python
# Add diagnostic endpoint or startup check
import os
def diagnose_data_folders():
"""Check data folder structure and validity"""
# Common locations
locations = [
os.path.expandvars(r'%PROGRAMDATA%\RFCP'),
os.path.expandvars(r'%APPDATA%\RFCP'),
os.path.expandvars(r'%LOCALAPPDATA%\RFCP'),
'./data',
'../data'
]
report = {}
for loc in locations:
if os.path.exists(loc):
report[loc] = {
'exists': True,
'files': os.listdir(loc),
'size_mb': sum(
os.path.getsize(os.path.join(loc, f))
for f in os.listdir(loc)
if os.path.isfile(os.path.join(loc, f))
) / 1024 / 1024
}
# Check terrain tiles
terrain_dir = os.path.join(loc, 'terrain')
if os.path.exists(terrain_dir):
tiles = [f for f in os.listdir(terrain_dir) if f.endswith('.hgt')]
report[loc]['terrain_tiles'] = len(tiles)
# Check OSM cache
osm_dir = os.path.join(loc, 'osm_cache')
if os.path.exists(osm_dir):
cache_files = os.listdir(osm_dir)
report[loc]['osm_cache_files'] = len(cache_files)
return report
```
**Step 2: Validate terrain tiles**
```python
def validate_terrain_tile(filepath: str) -> dict:
"""Check if terrain tile is valid"""
import struct
result = {
'path': filepath,
'exists': os.path.exists(filepath),
'valid': False,
'error': None
}
if not result['exists']:
result['error'] = 'File not found'
return result
try:
size = os.path.getsize(filepath)
# SRTM1 (1 arc-second): 3601x3601x2 = 25,934,402 bytes
# SRTM3 (3 arc-second): 1201x1201x2 = 2,884,802 bytes
if size == 25934402:
result['type'] = 'SRTM1'
result['valid'] = True
elif size == 2884802:
result['type'] = 'SRTM3'
result['valid'] = True
else:
result['error'] = f'Unexpected size: {size} bytes'
except Exception as e:
result['error'] = str(e)
return result
```
**Step 3: Fix OSM cache for regions**
```python
def get_osm_cache_key(bbox: tuple, data_type: str) -> str:
"""Generate consistent cache key for OSM data"""
# Round to avoid floating point issues
lat_min = round(bbox[0], 4)
lon_min = round(bbox[1], 4)
lat_max = round(bbox[2], 4)
lon_max = round(bbox[3], 4)
return f"{data_type}_{lat_min}_{lon_min}_{lat_max}_{lon_max}.json"
def validate_osm_cache(cache_path: str) -> bool:
"""Check if cached OSM data is valid"""
try:
with open(cache_path, 'r') as f:
data = json.load(f)
# Check structure
if not isinstance(data, dict):
return False
if 'elements' not in data and not isinstance(data, list):
return False
return True
except:
return False
```
---
## Part 3: UI Fixes
### 3.1 Calculate Button Position
**Problem:** Button overlaps with scrollbar
**File:** `frontend/src/components/CoverageSettings.tsx` or similar
```tsx
// Move Calculate button outside scrollable area
// Or add right margin
<div className="coverage-settings-panel">
<div className="scrollable-content">
{/* All settings */}
</div>
<div className="fixed-footer" style={{
padding: '16px',
borderTop: '1px solid var(--border-color)',
marginRight: '16px' // Space for scrollbar
}}>
<button
className="calculate-button"
onClick={handleCalculate}
style={{ width: '100%' }}
>
Calculate Coverage
</button>
</div>
</div>
```
### 3.2 Site Drag - Move Sectors Together
**Problem:** Dragging site doesn't move its sectors
**File:** `frontend/src/components/map/SiteMarker.tsx` or site handling code
```typescript
const handleSiteDrag = (siteId: string, newLat: number, newLon: number) => {
const site = getSite(siteId);
if (!site) return;
// Calculate delta
const deltaLat = newLat - site.lat;
const deltaLon = newLon - site.lon;
// Update site position
updateSite(siteId, { lat: newLat, lon: newLon });
// Update all sectors of this site
const sectors = getSectorsForSite(siteId);
sectors.forEach(sector => {
updateSector(sector.id, {
lat: sector.lat + deltaLat,
lon: sector.lon + deltaLon
});
});
};
```
### 3.3 Site Delete - Remove Sectors Together
**Problem:** Deleting site doesn't remove its sectors
**File:** `frontend/src/store/sites.ts` or site management
```typescript
const deleteSite = (siteId: string) => {
// First, delete all sectors belonging to this site
const sectors = get().sectors.filter(s => s.siteId === siteId);
sectors.forEach(sector => {
deleteSector(sector.id);
});
// Then delete the site
set(state => ({
sites: state.sites.filter(s => s.id !== siteId)
}));
};
```
---
## Part 4: Testing Checklist
### Performance Tests
- [ ] 5km Standard: < 10 seconds
- [ ] 5km Detailed: < 60 seconds
- [ ] 10km Standard: < 30 seconds
- [ ] 10km Detailed: < 120 seconds (was timeout)
- [ ] Radial preview (any radius): < 5 seconds
### Bug Fix Tests
- [ ] Progress bar updates from 5% → 100%
- [ ] App closes completely when clicking X
- [ ] Memory returns to baseline after calculation
- [ ] Western Ukraine region calculates successfully
- [ ] All terrain tiles validate correctly
- [ ] OSM cache files are valid JSON
### UI Tests
- [ ] Calculate button not overlapping scrollbar
- [ ] Dragging site moves all its sectors
- [ ] Deleting site removes all its sectors
- [ ] No console errors in browser DevTools
### Data Validation
- [ ] Run `diagnose_data_folders()` - check output
- [ ] Validate all terrain tiles in cache
- [ ] Validate all OSM cache files
- [ ] Check for corrupted files and remove them
---
## Build & Deploy
```powershell
# Backend
cd D:\root\rfcp\backend
pip install -e .
# Frontend
cd D:\root\rfcp\frontend
npm run build
# Electron
cd D:\root\rfcp\desktop
npm run build
# Full test
cd D:\root\rfcp\installer
.\test-detailed-quick.bat
# If works, rebuild installer
pyinstaller rfcp-server.spec --clean
```
---
## Commit Message
```
feat: Iteration 3.2.0 - Performance & Bug Fixes
Performance:
- Add adaptive resolution (distance-based grid density)
- Add radial preview mode (360 spokes, instant feedback)
- Expected: 10km Detailed ~60s (was timeout)
Bug Fixes:
- Fix progress bar stuck at 5% (WebSocket + React state)
- Fix app close button (Electron lifecycle)
- Fix memory leak (SharedMemory cleanup)
- Fix region data issues (cache validation)
UI Improvements:
- Move Calculate button (scrollbar overlap)
- Site drag moves all sectors
- Site delete removes all sectors
Data Validation:
- Add terrain tile validation
- Add OSM cache validation
- Add diagnostic reporting
```
---
## Success Criteria
1. **10km Detailed completes** without timeout (~60-120s acceptable)
2. **Progress bar works** - shows actual progress 5% → 100%
3. **App closes cleanly** - no orphan processes
4. **Memory released** - returns to baseline after calculation
5. **All regions work** - Western Ukraine calculates successfully
6. **Site management** - drag/delete affects sectors correctly
---
## Priority Order for Implementation
1. **Adaptive Resolution** - biggest performance impact
2. **Progress bar fix** - critical UX issue
3. **App close fix** - annoying bug
4. **Site drag/delete sectors** - quick win
5. **Calculate button position** - quick win
6. **Memory leak** - important but complex
7. **Region data validation** - diagnostic + fix
8. **Radial preview** - nice to have
---
## Notes for Claude Code
- This is a large iteration - take it step by step
- Test after each major change
- Backend and frontend changes may need coordination
- Electron changes require rebuild of desktop app
- Data validation can be added as debug endpoint first
- If stuck on one issue, move to next and come back
---
*"Big iteration, big impact. Let's make RFCP production-ready!"* 🚀

View File

@@ -44,7 +44,10 @@ from app.services.terrain_service import terrain_service, TerrainService
from app.services.los_service import los_service
from app.services.buildings_service import buildings_service, Building
from app.services.materials_service import materials_service
from app.services.dominant_path_service import dominant_path_service, find_dominant_paths_vectorized
from app.services.dominant_path_service import (
dominant_path_service, find_dominant_paths_vectorized,
get_lod_level, LODLevel, SIMPLIFIED_MAX_BUILDINGS,
)
from app.services.street_canyon_service import street_canyon_service, Street
from app.services.reflection_service import reflection_service
from app.services.spatial_index import get_spatial_index, SpatialIndex
@@ -619,12 +622,25 @@ class CoverageService:
_clog(f" Tiles in memory: {len(self.terrain._tile_cache)}")
if any(isinstance(v, (int, float)) and v > 0.001 for v in timing.values()):
_clog("=== PER-STEP BREAKDOWN ===")
lod_keys = {"lod_none", "lod_simplified", "lod_full"}
for step, dt in timing.items():
if step in lod_keys:
continue # Print LOD stats separately
if isinstance(dt, (int, float)) and dt > 0.001:
_clog(f" {step:20s} {dt:.3f}s "
f"({dt/max(1,len(grid))*1000:.2f}ms/point)")
elif not isinstance(dt, (int, float)):
_clog(f" {step:20s} {dt}")
# LOD stats
lod_none = timing.get("lod_none", 0)
lod_simp = timing.get("lod_simplified", 0)
lod_full = timing.get("lod_full", 0)
lod_total = lod_none + lod_simp + lod_full
if lod_total > 0:
_clog(f"=== LOD BREAKDOWN ({lod_total} points with dominant_path) ===")
_clog(f" LOD_NONE (>3km) {lod_none:5d} points ({lod_none*100//lod_total}%) — skipped")
_clog(f" LOD_SIMPLIFIED {lod_simp:5d} points ({lod_simp*100//lod_total}%) — {SIMPLIFIED_MAX_BUILDINGS} buildings max")
_clog(f" LOD_FULL (<1.5km) {lod_full:5d} points ({lod_full*100//lod_total}%) — full calculation")
return points
@@ -834,31 +850,51 @@ class CoverageService:
break
timing["buildings"] += time.time() - t0
# Dominant path (vectorized NumPy) — replaces loop-based sync version
# Dominant path (vectorized NumPy) with LOD optimization
# Only enter when there are actual buildings (spatial_idx with data OR non-empty list)
has_building_data = nearby_buildings or (spatial_idx is not None and spatial_idx._grid)
if settings.use_dominant_path and has_building_data:
t0 = time.time()
try:
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx,
)
if dominant['path_type'] == 'direct':
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
except Exception:
pass # Skip dominant path on error — use base model
timing["dominant_path"] += time.time() - t0
lod = get_lod_level(distance)
# LOD_NONE: skip dominant path entirely for distant points (>3km)
if lod == LODLevel.NONE:
timing.setdefault("lod_none", 0)
timing["lod_none"] += 1
else:
t0 = time.time()
try:
# LOD_SIMPLIFIED: limit buildings for mid-range points (1.5-3km)
dp_buildings = nearby_buildings
dp_spatial = spatial_idx
if lod == LODLevel.SIMPLIFIED:
timing.setdefault("lod_simplified", 0)
timing["lod_simplified"] += 1
if len(nearby_buildings) > SIMPLIFIED_MAX_BUILDINGS:
dp_buildings = nearby_buildings[:SIMPLIFIED_MAX_BUILDINGS]
dp_spatial = None # Skip spatial queries, use filtered list only
else:
timing.setdefault("lod_full", 0)
timing["lod_full"] += 1
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, dp_buildings,
spatial_idx=dp_spatial,
)
if dominant['path_type'] == 'direct':
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
except Exception:
pass # Skip dominant path on error — use base model
timing["dominant_path"] += time.time() - t0
# Street canyon (sync)
if settings.use_street_canyon and streets:

View File

@@ -1,5 +1,6 @@
import time
import numpy as np
from enum import Enum
from typing import List, Tuple, Optional, Dict, Any, TYPE_CHECKING
from dataclasses import dataclass
from app.services.terrain_service import terrain_service
@@ -15,6 +16,38 @@ if TYPE_CHECKING:
from app.services.spatial_index import SpatialIndex
# ── Level of Detail (LOD) for dominant path calculations ──
class LODLevel(Enum):
"""Distance-based level of detail for dominant path analysis.
At long distances, building-level multipath contributes minimally
to path loss — macro propagation models suffice.
"""
NONE = "none" # Skip dominant path entirely
SIMPLIFIED = "simplified" # Check only nearest few buildings
FULL = "full" # Full calculation (current behavior)
# LOD distance thresholds (meters)
LOD_THRESHOLD_NONE = 3000 # >3km: skip dominant path
LOD_THRESHOLD_SIMPLIFIED = 1500 # 1.5-3km: simplified mode
# Simplified mode limits
SIMPLIFIED_MAX_BUILDINGS = 5
SIMPLIFIED_MAX_WALLS = 50
def get_lod_level(distance_m: float) -> LODLevel:
"""Determine LOD level based on TX-RX distance."""
if distance_m > LOD_THRESHOLD_NONE:
return LODLevel.NONE
elif distance_m > LOD_THRESHOLD_SIMPLIFIED:
return LODLevel.SIMPLIFIED
else:
return LODLevel.FULL
@dataclass
class RayPath:
"""Single ray path from TX to RX"""