trigo / trigo-web /.env
k-l-lambda's picture
Update: fix disconnect state sync and dynamic socket URL
f6a5e63
# Unified Environment Configuration for Trigo Web
# This file is used by frontend (Vite), backend (Express), and tools (Node scripts)
#
# LOCAL OVERRIDES:
# Create .env.local (not committed to git) to override any values below
# Example: cp .env.local.example .env.local
#
# Loading order: .env → .env.local (overrides)
#
# See .env.local.example for common override scenarios
# ============================================================================
# Frontend Configuration (Vite - requires VITE_ prefix)
# ============================================================================
# Backend Server URL
# Vite Dev Server Configuration
VITE_HOST=0.0.0.0
VITE_PORT=5173
# ONNX Model Paths (relative to /public directory)
# Evaluation mode model - predicts position value
VITE_ONNX_EVALUATION_MODEL=/onnx/20251230-trigo-value-llama-l6-h64-it2_251221-value0.01-pretrain/LlamaCausalLM_ep0036_evaluation.onnx
# Tree mode model - generates move trees
VITE_ONNX_TREE_MODEL=/onnx/20251230-trigo-value-llama-l6-h64-it2_251221-value0.01-pretrain/LlamaCausalLM_ep0036_tree.onnx
# ============================================================================
# Backend Configuration (Express Server)
# ============================================================================
# Server port (HTTP and Socket.io)
PORT=3000
# Frontend URL (used for CORS)
CLIENT_URL=http://localhost:5173
# Environment mode
NODE_ENV=production
# ============================================================================
# Tools Configuration (Node scripts - tools/ directory)
# ============================================================================
# ONNX Model Paths (relative to project root)
# Evaluation mode model - predicts position value
ONNX_EVALUATION_MODEL=./public/onnx/20251204-trigo-value-gpt2-l6-h64-251125-lr500/GPT2CausalLM_ep0019_evaluation.onnx
# Tree mode model - generates move trees
ONNX_TREE_MODEL=./public/onnx/20251204-trigo-value-gpt2-l6-h64-251125-lr500/GPT2CausalLM_ep0019_tree.onnx
# ONNX Runtime Performance Configuration
# See docs/onnx-threading-configuration.md for detailed tuning guide
# Intra-operator parallelism (threads within a single operator)
# Recommended: 4 for most systems, higher for large models
# ONNX_INTRA_OP_NUM_THREADS=4
#ONNX_INTRA_OP_NUM_THREADS=28
# Inter-operator parallelism (threads across operators)
# Recommended: 1-2 for sequential models, higher for complex graphs
# ONNX_INTER_OP_NUM_THREADS=2
ONNX_INTER_OP_NUM_THREADS=28
# Graph optimization level
# Options: "disabled", "basic", "extended", "all" (default: "all")
# ONNX_GRAPH_OPTIMIZATION_LEVEL=all
# Memory optimization settings
# ONNX_ENABLE_CPU_MEM_ARENA=true
# ONNX_ENABLE_MEM_PATTERN=true