@mytec: before 3.0 REFACTOR

This commit is contained in:
2026-02-01 14:26:17 +02:00
parent acc90fe538
commit 1dde56705a
13 changed files with 1759 additions and 61 deletions

View File

@@ -44,7 +44,7 @@ from app.services.terrain_service import terrain_service, TerrainService
from app.services.los_service import los_service
from app.services.buildings_service import buildings_service, Building
from app.services.materials_service import materials_service
from app.services.dominant_path_service import dominant_path_service
from app.services.dominant_path_service import dominant_path_service, find_dominant_paths_vectorized
from app.services.street_canyon_service import street_canyon_service, Street
from app.services.reflection_service import reflection_service
from app.services.spatial_index import get_spatial_index, SpatialIndex
@@ -648,22 +648,28 @@ class CoverageService:
break
timing["buildings"] += time.time() - t0
# Dominant path (sync) — uses spatial index for O(1) building lookups
# Dominant path (vectorized NumPy) — replaces loop-based sync version
if settings.use_dominant_path and (spatial_idx or nearby_buildings):
t0 = time.time()
paths = dominant_path_service.find_dominant_paths_sync(
dominant = find_dominant_paths_vectorized(
site.lat, site.lon, site.height,
lat, lon, 1.5,
site.frequency, nearby_buildings,
spatial_idx=spatial_idx
spatial_idx=spatial_idx,
)
if paths:
best_path = paths[0]
if best_path.is_valid and best_path.path_loss < (path_loss + terrain_loss + building_loss):
path_loss = best_path.path_loss
terrain_loss = 0
building_loss = 0
has_los = best_path.path_type == "direct" and not best_path.materials_crossed
if dominant['path_type'] == 'direct':
# Direct LOS confirmed by vectorized check
has_los = True
building_loss = 0.0
elif dominant['path_type'] == 'reflection':
# Reflection path bypasses buildings — reduce building loss
building_loss = max(0.0, building_loss - (10.0 - dominant['total_loss']))
has_los = False
elif dominant['path_type'] == 'diffraction':
# Diffraction: use estimated loss if worse than current
if dominant['total_loss'] > building_loss:
building_loss = dominant['total_loss']
has_los = False
timing["dominant_path"] += time.time() - t0
# Street canyon (sync)

View File

@@ -1,10 +1,15 @@
import time
import numpy as np
from typing import List, Tuple, Optional, TYPE_CHECKING
from typing import List, Tuple, Optional, Dict, Any, TYPE_CHECKING
from dataclasses import dataclass
from app.services.terrain_service import terrain_service
from app.services.buildings_service import buildings_service, Building
from app.services.materials_service import materials_service, BuildingMaterial
from app.services.geometry_vectorized import (
points_to_local_coords,
line_intersects_polygons_batch,
find_best_reflection_path_vectorized,
)
if TYPE_CHECKING:
from app.services.spatial_index import SpatialIndex
@@ -21,9 +26,9 @@ class RayPath:
is_valid: bool # Does this path exist?
MAX_BUILDINGS_FOR_LINE = 50
MAX_BUILDINGS_FOR_REFLECTION = 30
MAX_DISTANCE_FROM_PATH = 300 # meters
MAX_BUILDINGS_FOR_LINE = 30
MAX_BUILDINGS_FOR_REFLECTION = 20
MAX_DISTANCE_FROM_PATH = 200 # meters
def _filter_buildings_by_distance(buildings, tx_point, rx_point, max_count=100, max_distance=500):
@@ -60,6 +65,204 @@ def _filter_buildings_by_distance(buildings, tx_point, rx_point, max_count=100,
return filtered[:max_count]
# ── Vectorized dominant path (NumPy) ──
_vec_log_count = 0
def _buildings_to_arrays(buildings: List[Building], ref_lat: float, ref_lon: float):
"""Convert Building objects to numpy arrays for vectorized geometry.
Returns:
walls_start: (W, 2) wall start points in local XY meters
walls_end: (W, 2) wall end points in local XY meters
wall_to_building: (W,) mapping wall index -> building index
poly_x: flattened polygon X coords
poly_y: flattened polygon Y coords
poly_lengths: (num_polygons,) vertices per polygon
"""
all_walls_start = []
all_walls_end = []
wall_to_building = []
all_poly_x = []
all_poly_y = []
poly_lengths = []
for i, b in enumerate(buildings):
geom = b.geometry # [[lon, lat], ...]
if not geom or len(geom) < 3:
poly_lengths.append(0)
continue
poly_lats = np.array([p[1] for p in geom])
poly_lons = np.array([p[0] for p in geom])
px, py = points_to_local_coords(ref_lat, ref_lon, poly_lats, poly_lons)
all_poly_x.extend(px)
all_poly_y.extend(py)
poly_lengths.append(len(geom))
# Extract wall segments
for j in range(len(geom) - 1):
all_walls_start.append([px[j], py[j]])
all_walls_end.append([px[j + 1], py[j + 1]])
wall_to_building.append(i)
return (
np.array(all_walls_start) if all_walls_start else np.zeros((0, 2)),
np.array(all_walls_end) if all_walls_end else np.zeros((0, 2)),
np.array(wall_to_building, dtype=int) if wall_to_building else np.zeros(0, dtype=int),
np.array(all_poly_x) if all_poly_x else np.zeros(0),
np.array(all_poly_y) if all_poly_y else np.zeros(0),
np.array(poly_lengths, dtype=int),
)
def find_dominant_paths_vectorized(
tx_lat: float, tx_lon: float, tx_height: float,
rx_lat: float, rx_lon: float, rx_height: float,
frequency_mhz: float,
buildings: List[Building],
spatial_idx: 'Optional[SpatialIndex]' = None,
) -> Dict[str, Any]:
"""Vectorized dominant path finding using NumPy batch operations.
Replaces the loop-based find_dominant_paths_sync() with:
1. Batch building-to-array conversion
2. Vectorized LOS polygon intersection check
3. Vectorized reflection point calculation
4. Simplified diffraction estimate
Returns dict with:
has_los, path_type, total_loss, path_length, reflection_point
"""
global _vec_log_count
# Get nearby buildings via spatial index (same filtering as sync version)
if spatial_idx:
line_buildings = spatial_idx.query_line(tx_lat, tx_lon, rx_lat, rx_lon)
else:
line_buildings = buildings
line_buildings = _filter_buildings_by_distance(
line_buildings,
(tx_lat, tx_lon), (rx_lat, rx_lon),
max_count=MAX_BUILDINGS_FOR_LINE,
max_distance=MAX_DISTANCE_FROM_PATH,
)
# Reference point for local coordinate system
ref_lat = (tx_lat + rx_lat) / 2
ref_lon = (tx_lon + rx_lon) / 2
# Convert TX/RX to local meters
tx_xy = points_to_local_coords(ref_lat, ref_lon, np.array([tx_lat]), np.array([tx_lon]))
rx_xy = points_to_local_coords(ref_lat, ref_lon, np.array([rx_lat]), np.array([rx_lon]))
tx = np.array([tx_xy[0][0], tx_xy[1][0]])
rx = np.array([rx_xy[0][0], rx_xy[1][0]])
direct_dist = np.linalg.norm(rx - tx)
# Convert buildings to arrays
walls_start, walls_end, wall_to_bldg, poly_x, poly_y, poly_lengths = (
_buildings_to_arrays(line_buildings, ref_lat, ref_lon)
)
# Diagnostic log for first few points
_vec_log_count += 1
if _vec_log_count <= 3:
print(
f"[DOMINANT_PATH_VEC] Point #{_vec_log_count}: "
f"buildings={len(line_buildings)}, walls={len(walls_start)}, "
f"dist={direct_dist:.0f}m",
flush=True,
)
# No buildings → direct LOS
if len(poly_lengths) == 0 or np.all(poly_lengths < 3):
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': direct_dist,
'reflection_point': None,
}
# Step 1: Vectorized direct LOS check
intersects, _ = line_intersects_polygons_batch(tx, rx, poly_x, poly_y, poly_lengths)
if not np.any(intersects):
return {
'has_los': True,
'path_type': 'direct',
'total_loss': 0.0,
'path_length': direct_dist,
'reflection_point': None,
}
# Step 2: Vectorized reflection path finding
# Use all line buildings for reflection walls
if spatial_idx:
mid_lat = (tx_lat + rx_lat) / 2
mid_lon = (tx_lon + rx_lon) / 2
refl_buildings = spatial_idx.query_point(mid_lat, mid_lon, buffer_cells=3)
refl_buildings = _filter_buildings_by_distance(
refl_buildings,
(tx_lat, tx_lon), (rx_lat, rx_lon),
max_count=MAX_BUILDINGS_FOR_REFLECTION,
max_distance=MAX_DISTANCE_FROM_PATH,
)
# Merge line + reflection buildings (deduplicate by id)
seen_ids = {b.id for b in line_buildings}
merged = list(line_buildings)
for b in refl_buildings:
if b.id not in seen_ids:
merged.append(b)
seen_ids.add(b.id)
r_walls_start, r_walls_end, r_wall_to_bldg, r_poly_x, r_poly_y, r_poly_lengths = (
_buildings_to_arrays(merged, ref_lat, ref_lon)
)
else:
r_walls_start, r_walls_end, r_wall_to_bldg = walls_start, walls_end, wall_to_bldg
r_poly_x, r_poly_y, r_poly_lengths = poly_x, poly_y, poly_lengths
refl_point, refl_length, refl_loss = find_best_reflection_path_vectorized(
tx, rx,
r_walls_start, r_walls_end, r_wall_to_bldg,
r_poly_x, r_poly_y, r_poly_lengths,
max_candidates=30,
max_walls=100,
max_los_checks=10,
)
if refl_point is not None:
# Convert reflection point back to lat/lon
cos_lat = np.cos(np.radians(ref_lat))
refl_lat = ref_lat + refl_point[1] / 110540.0
refl_lon = ref_lon + refl_point[0] / (111320.0 * cos_lat)
return {
'has_los': False,
'path_type': 'reflection',
'total_loss': refl_loss,
'path_length': refl_length,
'reflection_point': (refl_lat, refl_lon),
}
# Step 3: Diffraction fallback
num_blocking = int(np.sum(intersects))
diffraction_loss = 10.0 + 5.0 * min(num_blocking, 5)
return {
'has_los': False,
'path_type': 'diffraction',
'total_loss': diffraction_loss,
'path_length': direct_dist,
'reflection_point': None,
}
class DominantPathService:
"""
Find dominant propagation paths (2-3 strongest)

View File

@@ -0,0 +1,309 @@
"""
Vectorized geometry operations using NumPy.
All functions operate on arrays, not single values.
Provides 10-50x speedup over Python loops for batch geometry checks.
"""
import numpy as np
from typing import Tuple, Optional
EARTH_RADIUS = 6371000 # meters
def haversine_batch(
lat1: float, lon1: float,
lats2: np.ndarray, lons2: np.ndarray,
) -> np.ndarray:
"""Distance from one point to many points (meters)."""
lat1_rad = np.radians(lat1)
lon1_rad = np.radians(lon1)
lats2_rad = np.radians(lats2)
lons2_rad = np.radians(lons2)
dlat = lats2_rad - lat1_rad
dlon = lons2_rad - lon1_rad
a = np.sin(dlat / 2) ** 2 + np.cos(lat1_rad) * np.cos(lats2_rad) * np.sin(dlon / 2) ** 2
c = 2 * np.arcsin(np.sqrt(a))
return EARTH_RADIUS * c
def points_to_local_coords(
ref_lat: float, ref_lon: float,
lats: np.ndarray, lons: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert lat/lon to local X/Y meters (equirectangular projection)."""
cos_lat = np.cos(np.radians(ref_lat))
x = (lons - ref_lon) * 111320.0 * cos_lat
y = (lats - ref_lat) * 110540.0
return x, y
def line_segments_intersect_batch(
p1: np.ndarray, p2: np.ndarray,
segments_start: np.ndarray, segments_end: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check if line p1->p2 intersects with N segments.
Args:
p1, p2: shape (2,)
segments_start, segments_end: shape (N, 2)
Returns:
intersects: bool array (N,)
t_values: parameter along p1->p2 (N,)
"""
d = p2 - p1
seg_d = segments_end - segments_start
cross = d[0] * seg_d[:, 1] - d[1] * seg_d[:, 0]
parallel_mask = np.abs(cross) < 1e-10
cross_safe = np.where(parallel_mask, 1.0, cross)
dp = p1 - segments_start
t = (dp[:, 0] * seg_d[:, 1] - dp[:, 1] * seg_d[:, 0]) / cross_safe
u = (dp[:, 0] * d[1] - dp[:, 1] * d[0]) / cross_safe
intersects = ~parallel_mask & (t >= 0) & (t <= 1) & (u >= 0) & (u <= 1)
return intersects, t
def line_intersects_polygons_batch(
p1: np.ndarray, p2: np.ndarray,
polygons_x: np.ndarray, polygons_y: np.ndarray,
polygon_lengths: np.ndarray,
max_polygons: int = 30,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check if line p1->p2 intersects multiple polygons.
Args:
p1, p2: shape (2,)
polygons_x, polygons_y: flattened vertex arrays
polygon_lengths: vertices per polygon (num_polygons,)
max_polygons: only check nearest N polygons (bbox pre-filter)
Returns:
intersects: bool (num_polygons,)
min_distances: distance to first hit (num_polygons,)
"""
num_polygons = len(polygon_lengths)
if num_polygons == 0:
return np.array([], dtype=bool), np.array([])
intersects = np.zeros(num_polygons, dtype=bool)
min_t = np.full(num_polygons, np.inf)
# Pre-filter: only check polygons whose first vertex is near the line bbox
if num_polygons > max_polygons:
buf = 50.0 # 50m buffer
line_min_x = min(p1[0], p2[0]) - buf
line_max_x = max(p1[0], p2[0]) + buf
line_min_y = min(p1[1], p2[1]) - buf
line_max_y = max(p1[1], p2[1]) + buf
nearby_mask = np.zeros(num_polygons, dtype=bool)
vi = 0
for i, length in enumerate(polygon_lengths):
if length >= 3:
cx = polygons_x[vi]
cy = polygons_y[vi]
if line_min_x <= cx <= line_max_x and line_min_y <= cy <= line_max_y:
nearby_mask[i] = True
vi += length
# Cap at max_polygons
nearby_indices = np.where(nearby_mask)[0]
if len(nearby_indices) > max_polygons:
nearby_mask = np.zeros(num_polygons, dtype=bool)
nearby_mask[nearby_indices[:max_polygons]] = True
else:
nearby_mask = np.ones(num_polygons, dtype=bool)
idx = 0
for i, length in enumerate(polygon_lengths):
if length < 3 or not nearby_mask[i]:
idx += length
continue
px = polygons_x[idx:idx + length]
py = polygons_y[idx:idx + length]
starts = np.stack([px, py], axis=1)
ends = np.stack([np.roll(px, -1), np.roll(py, -1)], axis=1)
edge_intersects, t_vals = line_segments_intersect_batch(p1, p2, starts, ends)
if np.any(edge_intersects):
intersects[i] = True
min_t[i] = np.min(t_vals[edge_intersects])
idx += length
line_length = np.linalg.norm(p2 - p1)
min_distances = min_t * line_length
return intersects, min_distances
def calculate_reflection_points_batch(
tx: np.ndarray, rx: np.ndarray,
wall_starts: np.ndarray, wall_ends: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate reflection points on N walls via mirror-image method.
Args:
tx, rx: shape (2,)
wall_starts, wall_ends: shape (N, 2)
Returns:
reflection_points: (N, 2)
valid: bool (N,)
"""
wall_vec = wall_ends - wall_starts
wall_length = np.linalg.norm(wall_vec, axis=1, keepdims=True)
wall_unit = wall_vec / np.maximum(wall_length, 1e-10)
normals = np.stack([-wall_unit[:, 1], wall_unit[:, 0]], axis=1)
tx_to_wall = tx - wall_starts
tx_dist_to_wall = np.sum(tx_to_wall * normals, axis=1, keepdims=True)
tx_mirror = tx - 2 * tx_dist_to_wall * normals
rx_to_mirror = tx_mirror - rx
cross_denom = (rx_to_mirror[:, 0] * wall_vec[:, 1] -
rx_to_mirror[:, 1] * wall_vec[:, 0])
valid_denom = np.abs(cross_denom) > 1e-10
cross_denom_safe = np.where(valid_denom, cross_denom, 1.0)
rx_to_start = wall_starts - rx
t = (rx_to_start[:, 0] * rx_to_mirror[:, 1] -
rx_to_start[:, 1] * rx_to_mirror[:, 0]) / cross_denom_safe
reflection_points = wall_starts + t[:, np.newaxis] * wall_vec
valid = valid_denom & (t >= 0) & (t <= 1) & (tx_dist_to_wall[:, 0] > 0)
return reflection_points, valid
def find_best_reflection_path_vectorized(
tx: np.ndarray, rx: np.ndarray,
building_walls_start: np.ndarray,
building_walls_end: np.ndarray,
wall_to_building: np.ndarray,
obstacle_polygons_x: np.ndarray,
obstacle_polygons_y: np.ndarray,
obstacle_lengths: np.ndarray,
max_candidates: int = 50,
max_walls: int = 100,
max_los_checks: int = 10,
) -> Tuple[Optional[np.ndarray], float, float]:
"""Find best single-reflection path using vectorized ops.
Args:
max_walls: Only consider closest N walls for reflection candidates.
max_los_checks: Only verify LOS for top N shortest reflection paths.
Returns:
best_reflection_point: (2,) or None
best_path_length: meters
best_reflection_loss: dB
"""
num_walls = len(building_walls_start)
if num_walls == 0:
return None, np.inf, 0.0
# Limit walls by distance to path midpoint
if num_walls > max_walls:
midpoint = (tx + rx) / 2
wall_midpoints = (building_walls_start + building_walls_end) / 2
wall_distances = np.linalg.norm(wall_midpoints - midpoint, axis=1)
closest = np.argpartition(wall_distances, max_walls)[:max_walls]
building_walls_start = building_walls_start[closest]
building_walls_end = building_walls_end[closest]
wall_to_building = wall_to_building[closest]
refl_points, valid = calculate_reflection_points_batch(
tx, rx, building_walls_start, building_walls_end,
)
if not np.any(valid):
return None, np.inf, 0.0
valid_indices = np.where(valid)[0]
valid_refl = refl_points[valid]
tx_to_refl = np.linalg.norm(valid_refl - tx, axis=1)
refl_to_rx = np.linalg.norm(rx - valid_refl, axis=1)
path_lengths = tx_to_refl + refl_to_rx
# Direct distance filter: skip if reflection path > 2x direct
direct_dist = np.linalg.norm(rx - tx)
within_range = path_lengths <= direct_dist * 2.0
if not np.any(within_range):
return None, np.inf, 0.0
valid_indices = valid_indices[within_range]
valid_refl = valid_refl[within_range]
path_lengths = path_lengths[within_range]
# Keep top candidates by shortest path
if len(valid_indices) > max_candidates:
top_idx = np.argpartition(path_lengths, max_candidates)[:max_candidates]
valid_indices = valid_indices[top_idx]
valid_refl = valid_refl[top_idx]
path_lengths = path_lengths[top_idx]
# Sort by path length for early exit
sort_order = np.argsort(path_lengths)
valid_refl = valid_refl[sort_order]
path_lengths = path_lengths[sort_order]
# Check LOS only for top N shortest candidates
check_count = min(len(valid_refl), max_los_checks)
best_idx = -1
best_length = np.inf
for i in range(check_count):
length = path_lengths[i]
if length >= best_length:
continue
refl_pt = valid_refl[i]
# TX -> reflection LOS
intersects1, _ = line_intersects_polygons_batch(
tx, refl_pt, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
)
if np.any(intersects1):
continue
# Reflection -> RX LOS
intersects2, _ = line_intersects_polygons_batch(
refl_pt, rx, obstacle_polygons_x, obstacle_polygons_y, obstacle_lengths,
)
if np.any(intersects2):
continue
best_idx = i
best_length = length
break # sorted by length, first valid is best
if best_idx < 0:
return None, np.inf, 0.0
best_point = valid_refl[best_idx]
# Reflection loss: 3-10 dB depending on path ratio
path_ratio = best_length / max(direct_dist, 1.0)
reflection_loss = 3.0 + 7.0 * min(1.0, (path_ratio - 1.0) * 2)
return best_point, best_length, reflection_loss