@mytec: iter3.7.0 start, gpu calc int
This commit is contained in:
70
installer/build-gpu.bat
Normal file
70
installer/build-gpu.bat
Normal file
@@ -0,0 +1,70 @@
|
||||
@echo off
|
||||
echo ========================================
|
||||
echo RFCP GPU Build — ONEDIR mode
|
||||
echo CuPy-cuda13x + CUDA Toolkit 13.x
|
||||
echo ========================================
|
||||
echo.
|
||||
|
||||
REM ── Check CuPy ──
|
||||
echo [1/5] Checking CuPy installation...
|
||||
python -c "import cupy; print(f' CuPy {cupy.__version__}')" 2>nul
|
||||
if errorlevel 1 (
|
||||
echo ERROR: CuPy not installed.
|
||||
echo Run: pip install cupy-cuda13x
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
REM ── Check CUDA compute ──
|
||||
echo [2/5] Testing GPU compute...
|
||||
python -c "import cupy; a = cupy.array([1,2,3]); assert a.sum() == 6; print(' GPU compute: OK')" 2>nul
|
||||
if errorlevel 1 (
|
||||
echo ERROR: CuPy installed but GPU compute failed.
|
||||
echo Check: CUDA Toolkit installed? nvidia-smi works?
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
REM ── Check CUDA_PATH ──
|
||||
echo [3/5] Checking CUDA Toolkit...
|
||||
if defined CUDA_PATH (
|
||||
echo CUDA_PATH: %CUDA_PATH%
|
||||
) else (
|
||||
echo WARNING: CUDA_PATH not set
|
||||
)
|
||||
|
||||
REM ── Check nvidia pip DLLs ──
|
||||
echo [4/5] Checking nvidia pip packages...
|
||||
python -c "import nvidia; import os; base=os.path.dirname(nvidia.__file__); dlls=[f for d in os.listdir(base) if os.path.isdir(os.path.join(base,d,'bin')) for f in os.listdir(os.path.join(base,d,'bin')) if f.endswith('.dll')]; print(f' nvidia pip DLLs: {len(dlls)}')" 2>nul
|
||||
if errorlevel 1 (
|
||||
echo No nvidia pip packages (will use CUDA Toolkit)
|
||||
)
|
||||
|
||||
REM ── Build ──
|
||||
echo.
|
||||
echo [5/5] Building rfcp-server (ONEDIR mode)...
|
||||
echo This may take 3-5 minutes...
|
||||
echo.
|
||||
|
||||
cd /d "%~dp0\..\backend"
|
||||
pyinstaller "..\installer\rfcp-server-gpu.spec" --clean --noconfirm
|
||||
|
||||
echo.
|
||||
echo ========================================
|
||||
if exist "dist\rfcp-server\rfcp-server.exe" (
|
||||
echo BUILD COMPLETE! (ONEDIR mode)
|
||||
echo.
|
||||
echo Output: backend\dist\rfcp-server\
|
||||
dir /b dist\rfcp-server\*.exe dist\rfcp-server\*.dll 2>nul | find /c /v "" > nul
|
||||
echo.
|
||||
echo Test commands:
|
||||
echo cd dist\rfcp-server
|
||||
echo rfcp-server.exe
|
||||
echo curl http://localhost:8090/api/health
|
||||
echo curl http://localhost:8090/api/gpu/status
|
||||
echo ========================================
|
||||
) else (
|
||||
echo BUILD FAILED — check errors above
|
||||
echo ========================================
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
pause
|
||||
84
installer/build-gpu.sh
Normal file
84
installer/build-gpu.sh
Normal file
@@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "========================================"
|
||||
echo " RFCP GPU Build — ONEDIR mode"
|
||||
echo " CuPy-cuda13x + CUDA Toolkit 13.x"
|
||||
echo "========================================"
|
||||
echo ""
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
BACKEND_DIR="$SCRIPT_DIR/../backend"
|
||||
|
||||
# Check backend exists
|
||||
if [ ! -f "$BACKEND_DIR/run_server.py" ]; then
|
||||
echo "ERROR: Backend not found at $BACKEND_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Python
|
||||
echo "[1/5] Checking Python..."
|
||||
python3 --version || { echo "ERROR: Python3 not found"; exit 1; }
|
||||
|
||||
# Check CuPy
|
||||
echo ""
|
||||
echo "[2/5] Checking CuPy installation..."
|
||||
if ! python3 -c "import cupy; print(f' CuPy {cupy.__version__}')" 2>/dev/null; then
|
||||
echo "ERROR: CuPy not installed"
|
||||
echo ""
|
||||
echo "Install CuPy:"
|
||||
echo " pip3 install cupy-cuda13x"
|
||||
echo " # or for WSL2:"
|
||||
echo " pip3 install cupy-cuda13x --break-system-packages"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check GPU compute
|
||||
echo ""
|
||||
echo "[3/5] Testing GPU compute..."
|
||||
if python3 -c "import cupy; a = cupy.array([1,2,3]); assert a.sum() == 6; print(' GPU compute: OK')" 2>/dev/null; then
|
||||
:
|
||||
else
|
||||
echo "WARNING: GPU compute test failed (may still work)"
|
||||
fi
|
||||
|
||||
# Check CUDA
|
||||
echo ""
|
||||
echo "[4/5] Checking CUDA..."
|
||||
if [ -n "$CUDA_PATH" ]; then
|
||||
echo " CUDA_PATH: $CUDA_PATH"
|
||||
else
|
||||
echo " CUDA_PATH not set (relying on nvidia pip packages)"
|
||||
fi
|
||||
|
||||
# Check nvidia pip packages
|
||||
echo ""
|
||||
echo "[5/5] Checking nvidia pip packages..."
|
||||
python3 -c "import nvidia; print(' nvidia packages found')" 2>/dev/null || echo " No nvidia pip packages"
|
||||
|
||||
# Build
|
||||
echo ""
|
||||
echo "Building rfcp-server (ONEDIR mode)..."
|
||||
echo ""
|
||||
|
||||
cd "$BACKEND_DIR"
|
||||
pyinstaller "$SCRIPT_DIR/rfcp-server-gpu.spec" --clean --noconfirm
|
||||
|
||||
echo ""
|
||||
echo "========================================"
|
||||
if [ -f "dist/rfcp-server/rfcp-server" ] || [ -f "dist/rfcp-server/rfcp-server.exe" ]; then
|
||||
echo " BUILD COMPLETE! (ONEDIR mode)"
|
||||
echo ""
|
||||
echo " Output: backend/dist/rfcp-server/"
|
||||
ls -lh dist/rfcp-server/ | head -20
|
||||
echo ""
|
||||
echo " Test:"
|
||||
echo " cd dist/rfcp-server"
|
||||
echo " ./rfcp-server"
|
||||
echo " curl http://localhost:8090/api/health"
|
||||
echo "========================================"
|
||||
else
|
||||
echo " BUILD FAILED — check errors above"
|
||||
echo "========================================"
|
||||
exit 1
|
||||
fi
|
||||
@@ -3,6 +3,7 @@ set -e
|
||||
|
||||
echo "========================================="
|
||||
echo " RFCP Desktop Build (Windows)"
|
||||
echo " GPU-enabled ONEDIR build"
|
||||
echo "========================================="
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
@@ -14,15 +15,30 @@ npm ci
|
||||
npm run build
|
||||
cd ..
|
||||
|
||||
# 2. Build backend with PyInstaller
|
||||
echo "[2/4] Building backend..."
|
||||
# 2. Build backend with PyInstaller (GPU ONEDIR mode)
|
||||
echo "[2/4] Building backend (GPU)..."
|
||||
cd backend
|
||||
|
||||
# Check CuPy is available
|
||||
if ! python -c "import cupy" 2>/dev/null; then
|
||||
echo "WARNING: CuPy not installed - GPU acceleration will not be available"
|
||||
echo " Install with: pip install cupy-cuda13x"
|
||||
fi
|
||||
|
||||
python -m pip install -r requirements.txt
|
||||
python -m pip install pyinstaller
|
||||
cd ../installer
|
||||
python -m PyInstaller rfcp-server.spec --clean --noconfirm
|
||||
|
||||
# Build using GPU spec (ONEDIR output)
|
||||
python -m PyInstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
|
||||
|
||||
# Copy ONEDIR folder to desktop staging area
|
||||
# Result: desktop/backend-dist/win/rfcp-server/rfcp-server.exe + _internal/
|
||||
mkdir -p ../desktop/backend-dist/win
|
||||
cp dist/rfcp-server.exe ../desktop/backend-dist/win/
|
||||
rm -rf ../desktop/backend-dist/win/rfcp-server # Clean old build
|
||||
cp -r dist/rfcp-server ../desktop/backend-dist/win/rfcp-server
|
||||
|
||||
echo " Backend copied to: desktop/backend-dist/win/rfcp-server/"
|
||||
ls -la ../desktop/backend-dist/win/rfcp-server/*.exe 2>/dev/null || true
|
||||
cd ..
|
||||
|
||||
# 3. Build Electron app
|
||||
|
||||
305
installer/rfcp-server-gpu.spec
Normal file
305
installer/rfcp-server-gpu.spec
Normal file
@@ -0,0 +1,305 @@
|
||||
# rfcp-server-gpu.spec — GPU-enabled build (CuPy + CUDA 13.x)
|
||||
# RFCP Iteration 3.6.0
|
||||
#
|
||||
# Mode: ONEDIR (directory output, not single exe)
|
||||
# This is better for CUDA — DLLs load directly without temp extraction
|
||||
#
|
||||
# Requirements:
|
||||
# pip install cupy-cuda13x fastrlock pyinstaller
|
||||
# CUDA Toolkit 13.x installed (winget install Nvidia.CUDA)
|
||||
#
|
||||
# Build:
|
||||
# cd backend && pyinstaller ../installer/rfcp-server-gpu.spec --clean --noconfirm
|
||||
#
|
||||
# Output:
|
||||
# backend/dist/rfcp-server/rfcp-server.exe (+ DLLs in same folder)
|
||||
|
||||
import os
|
||||
import sys
|
||||
import glob
|
||||
from PyInstaller.utils.hooks import collect_all, collect_dynamic_libs
|
||||
|
||||
backend_path = os.path.abspath(os.path.join(os.path.dirname(SPEC), '..', 'backend'))
|
||||
print(f"[GPU SPEC] Backend path: {backend_path}")
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# Collect CuPy packages
|
||||
# ═══════════════════════════════════════════
|
||||
cupy_datas = []
|
||||
cupy_binaries = []
|
||||
cupy_hiddenimports = []
|
||||
cupyb_datas = []
|
||||
cupyb_binaries = []
|
||||
cupyb_hiddenimports = []
|
||||
|
||||
try:
|
||||
cupy_datas, cupy_binaries, cupy_hiddenimports = collect_all('cupy')
|
||||
cupyb_datas, cupyb_binaries, cupyb_hiddenimports = collect_all('cupy_backends')
|
||||
print(f"[GPU SPEC] CuPy: {len(cupy_binaries)} binaries, {len(cupy_datas)} data files")
|
||||
except Exception as e:
|
||||
print(f"[GPU SPEC] WARNING: CuPy collection failed: {e}")
|
||||
|
||||
# NOTE: nvidia pip packages REMOVED - they have cuda12 DLLs that conflict with cupy-cuda13x
|
||||
# We use CUDA Toolkit 13.x DLLs only
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# Collect CUDA Toolkit DLLs (system install)
|
||||
# ═══════════════════════════════════════════
|
||||
# Installed via: winget install Nvidia.CUDA
|
||||
cuda_toolkit_binaries = []
|
||||
cuda_path = os.environ.get('CUDA_PATH', '')
|
||||
|
||||
if cuda_path:
|
||||
# Scan BOTH bin\ and bin\x64\ directories
|
||||
cuda_bin_dirs = [
|
||||
os.path.join(cuda_path, 'bin'),
|
||||
os.path.join(cuda_path, 'bin', 'x64'),
|
||||
]
|
||||
|
||||
# Only essential CUDA runtime DLLs (exclude NPP, nvjpeg, nvblas, nvfatbin)
|
||||
cuda_dll_patterns = [
|
||||
'cublas64_*.dll',
|
||||
'cublasLt64_*.dll',
|
||||
'cudart64_*.dll',
|
||||
'cufft64_*.dll',
|
||||
'cufftw64_*.dll',
|
||||
'curand64_*.dll',
|
||||
'cusolver64_*.dll',
|
||||
'cusolverMg64_*.dll',
|
||||
'cusparse64_*.dll',
|
||||
'nvrtc64_*.dll',
|
||||
'nvrtc-builtins64_*.dll',
|
||||
'nvJitLink_*.dll',
|
||||
'nvjitlink_*.dll',
|
||||
]
|
||||
|
||||
collected_dlls = set() # Avoid duplicates
|
||||
for cuda_bin in cuda_bin_dirs:
|
||||
if os.path.isdir(cuda_bin):
|
||||
for pattern in cuda_dll_patterns:
|
||||
for dll in glob.glob(os.path.join(cuda_bin, pattern)):
|
||||
dll_name = os.path.basename(dll)
|
||||
if dll_name not in collected_dlls:
|
||||
cuda_toolkit_binaries.append((dll, '.'))
|
||||
collected_dlls.add(dll_name)
|
||||
print(f"[GPU SPEC] Scanned: {cuda_bin}")
|
||||
|
||||
print(f"[GPU SPEC] CUDA Toolkit ({cuda_path}): {len(cuda_toolkit_binaries)} DLLs")
|
||||
for dll, _ in cuda_toolkit_binaries:
|
||||
print(f"[GPU SPEC] {os.path.basename(dll)}")
|
||||
else:
|
||||
print("[GPU SPEC] ERROR: CUDA_PATH not set!")
|
||||
print("[GPU SPEC] Install: winget install Nvidia.CUDA")
|
||||
|
||||
# All GPU binaries (CUDA Toolkit only, no nvidia pip packages)
|
||||
all_gpu_binaries = cuda_toolkit_binaries
|
||||
|
||||
if len(all_gpu_binaries) == 0:
|
||||
print("[GPU SPEC] ⚠ NO CUDA DLLs FOUND!")
|
||||
print("[GPU SPEC] Install CUDA Toolkit: winget install Nvidia.CUDA")
|
||||
else:
|
||||
print(f"[GPU SPEC] ✅ Total GPU DLLs: {len(all_gpu_binaries)}")
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# Collect fastrlock (CuPy dependency)
|
||||
# ═══════════════════════════════════════════
|
||||
fl_datas = []
|
||||
fl_binaries = []
|
||||
fl_hiddenimports = []
|
||||
try:
|
||||
fl_datas, fl_binaries, fl_hiddenimports = collect_all('fastrlock')
|
||||
print(f"[GPU SPEC] fastrlock: {len(fl_binaries)} binaries")
|
||||
except Exception:
|
||||
print("[GPU SPEC] fastrlock not found (optional)")
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# PyInstaller Analysis
|
||||
# ═══════════════════════════════════════════
|
||||
|
||||
a = Analysis(
|
||||
[os.path.join(backend_path, 'run_server.py')],
|
||||
pathex=[backend_path],
|
||||
binaries=(
|
||||
cupy_binaries + cupyb_binaries +
|
||||
fl_binaries + all_gpu_binaries
|
||||
),
|
||||
datas=[
|
||||
# Include app/ source code
|
||||
(os.path.join(backend_path, 'app'), 'app'),
|
||||
] + cupy_datas + cupyb_datas + fl_datas,
|
||||
hiddenimports=[
|
||||
# ── Uvicorn internals ──
|
||||
'uvicorn.logging',
|
||||
'uvicorn.loops',
|
||||
'uvicorn.loops.auto',
|
||||
'uvicorn.loops.asyncio',
|
||||
'uvicorn.protocols',
|
||||
'uvicorn.protocols.http',
|
||||
'uvicorn.protocols.http.auto',
|
||||
'uvicorn.protocols.http.h11_impl',
|
||||
'uvicorn.protocols.http.httptools_impl',
|
||||
'uvicorn.protocols.websockets',
|
||||
'uvicorn.protocols.websockets.auto',
|
||||
'uvicorn.protocols.websockets.wsproto_impl',
|
||||
'uvicorn.lifespan',
|
||||
'uvicorn.lifespan.on',
|
||||
'uvicorn.lifespan.off',
|
||||
# ── FastAPI / Starlette ──
|
||||
'fastapi',
|
||||
'fastapi.middleware',
|
||||
'fastapi.middleware.cors',
|
||||
'fastapi.routing',
|
||||
'fastapi.responses',
|
||||
'fastapi.exceptions',
|
||||
'starlette',
|
||||
'starlette.routing',
|
||||
'starlette.middleware',
|
||||
'starlette.middleware.cors',
|
||||
'starlette.responses',
|
||||
'starlette.requests',
|
||||
'starlette.concurrency',
|
||||
'starlette.formparsers',
|
||||
'starlette.staticfiles',
|
||||
# ── Pydantic ──
|
||||
'pydantic',
|
||||
'pydantic.fields',
|
||||
'pydantic_settings',
|
||||
'pydantic_core',
|
||||
# ── HTTP / networking ──
|
||||
'httpx',
|
||||
'httpcore',
|
||||
'h11',
|
||||
'httptools',
|
||||
'anyio',
|
||||
'anyio._backends',
|
||||
'anyio._backends._asyncio',
|
||||
'sniffio',
|
||||
# ── MongoDB (motor/pymongo) ──
|
||||
'motor',
|
||||
'motor.motor_asyncio',
|
||||
'pymongo',
|
||||
'pymongo.errors',
|
||||
'pymongo.collection',
|
||||
'pymongo.database',
|
||||
'pymongo.mongo_client',
|
||||
# ── Async I/O ──
|
||||
'aiofiles',
|
||||
'aiofiles.os',
|
||||
'aiofiles.ospath',
|
||||
# ── Scientific ──
|
||||
'numpy',
|
||||
'numpy.core',
|
||||
'scipy',
|
||||
'scipy.special',
|
||||
'scipy.interpolate',
|
||||
'shapely',
|
||||
'shapely.geometry',
|
||||
'shapely.ops',
|
||||
# ── Multipart ──
|
||||
'multipart',
|
||||
'python_multipart',
|
||||
# ── Encoding ──
|
||||
'email.mime',
|
||||
'email.mime.multipart',
|
||||
# ── Multiprocessing ──
|
||||
'multiprocessing',
|
||||
'multiprocessing.pool',
|
||||
'multiprocessing.queues',
|
||||
'concurrent.futures',
|
||||
# ── CuPy + CUDA ──
|
||||
'cupy',
|
||||
'cupy.cuda',
|
||||
'cupy.cuda.runtime',
|
||||
'cupy.cuda.driver',
|
||||
'cupy.cuda.memory',
|
||||
'cupy.cuda.stream',
|
||||
'cupy.cuda.device',
|
||||
'cupy._core',
|
||||
'cupy._core.core',
|
||||
'cupy._core._routines_math',
|
||||
'cupy._core._routines_logic',
|
||||
'cupy._core._routines_manipulation',
|
||||
'cupy._core._routines_sorting',
|
||||
'cupy._core._routines_statistics',
|
||||
'cupy._core._cub_reduction',
|
||||
'cupy.fft',
|
||||
'cupy.linalg',
|
||||
'cupy.random',
|
||||
'cupy_backends',
|
||||
'cupy_backends.cuda',
|
||||
'cupy_backends.cuda.api',
|
||||
'cupy_backends.cuda.libs',
|
||||
'fastrlock',
|
||||
'fastrlock.rlock',
|
||||
] + cupy_hiddenimports + cupyb_hiddenimports + fl_hiddenimports,
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[os.path.join(os.path.dirname(SPEC), 'rthook_cuda_dlls.py')],
|
||||
# ── Exclude bloat ──
|
||||
excludes=[
|
||||
# GUI
|
||||
'tkinter',
|
||||
'matplotlib',
|
||||
'PIL',
|
||||
'IPython',
|
||||
# Data science bloat
|
||||
'pandas',
|
||||
'tensorflow',
|
||||
'torch',
|
||||
'keras',
|
||||
# Testing
|
||||
'pytest',
|
||||
|
||||
# Jupyter
|
||||
'jupyter',
|
||||
'notebook',
|
||||
'ipykernel',
|
||||
# gRPC / telemetry (often pulled in by dependencies)
|
||||
'grpc',
|
||||
'grpcio',
|
||||
'google.protobuf',
|
||||
'opentelemetry',
|
||||
'opentelemetry.sdk',
|
||||
'opentelemetry.instrumentation',
|
||||
# Ray (too heavy, we use multiprocessing)
|
||||
'ray',
|
||||
# Other
|
||||
'cv2',
|
||||
'sklearn',
|
||||
'sympy',
|
||||
],
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
pyz = PYZ(a.pure)
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# ONEDIR mode: EXE + COLLECT
|
||||
# ═══════════════════════════════════════════
|
||||
# Creates: dist/rfcp-server/rfcp-server.exe + all DLLs in same folder
|
||||
# Better for CUDA — no temp extraction needed
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
[], # No binaries/datas in EXE — they go in COLLECT
|
||||
exclude_binaries=True, # ONEDIR mode
|
||||
name='rfcp-server',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=False, # Don't compress — CUDA libs need fast loading
|
||||
console=True,
|
||||
icon=os.path.join(os.path.dirname(SPEC), 'rfcp.ico') if os.path.exists(os.path.join(os.path.dirname(SPEC), 'rfcp.ico')) else None,
|
||||
)
|
||||
|
||||
coll = COLLECT(
|
||||
exe,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
strip=False,
|
||||
upx=False,
|
||||
upx_exclude=[],
|
||||
name='rfcp-server',
|
||||
)
|
||||
BIN
installer/rfcp.ico
Normal file
BIN
installer/rfcp.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 116 KiB |
24
installer/rthook_cuda_dlls.py
Normal file
24
installer/rthook_cuda_dlls.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# PyInstaller runtime hook for CUDA DLL loading
|
||||
# Must run BEFORE any CuPy import
|
||||
#
|
||||
# Problem: Windows Python 3.8+ requires os.add_dll_directory() for DLL search
|
||||
# PyInstaller ONEDIR mode puts DLLs in _internal/ which isn't in the search path
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
|
||||
# _MEIPASS points to _internal/ in ONEDIR mode
|
||||
base = getattr(sys, '_MEIPASS', None)
|
||||
if base and os.path.isdir(base):
|
||||
os.add_dll_directory(base)
|
||||
print(f"[CUDA DLL Hook] Added DLL directory: {base}")
|
||||
|
||||
# Also add CUDA_PATH if available (fallback to system CUDA)
|
||||
cuda_path = os.environ.get('CUDA_PATH', '')
|
||||
if cuda_path:
|
||||
for subdir in ['bin', os.path.join('bin', 'x64')]:
|
||||
d = os.path.join(cuda_path, subdir)
|
||||
if os.path.isdir(d):
|
||||
os.add_dll_directory(d)
|
||||
print(f"[CUDA DLL Hook] Added CUDA_PATH: {d}")
|
||||
Reference in New Issue
Block a user