open-notebook / .env.railway
baveshraam's picture
FIX: SurrealDB 2.0 migration syntax and Frontend/CORS link
f871fed
# Railway Deployment Environment Variables
# Copy these to your Railway service's Variables section
# ============================================
# DATABASE CONNECTION (Single Container)
# ============================================
# Use 127.0.0.1 for Railway single-container deployment
SURREAL_URL=ws://127.0.0.1:8000/rpc
SURREAL_USER=root
SURREAL_PASSWORD=root
SURREAL_NAMESPACE=test
SURREAL_DATABASE=test
# ============================================
# API CONFIGURATION
# ============================================
# INTERNAL_API_URL: Used by Next.js server-side to proxy to FastAPI
INTERNAL_API_URL=http://127.0.0.1:5055
# API_URL: Public URL - SET THIS AFTER FIRST DEPLOY
# Replace YOUR_RAILWAY_APP_URL with your actual Railway app URL
# Format: https://your-app-name.up.railway.app (no /api at the end)
API_URL=https://YOUR_RAILWAY_APP_URL
# ============================================
# WORKER & RETRY CONFIGURATION
# ============================================
# Background worker concurrency (default: 5)
SURREAL_COMMANDS_MAX_TASKS=5
# Retry configuration for resilient background tasks
SURREAL_COMMANDS_RETRY_ENABLED=true
SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=3
SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential_jitter
SURREAL_COMMANDS_RETRY_WAIT_MIN=1
SURREAL_COMMANDS_RETRY_WAIT_MAX=30
# ============================================
# AI MODEL API KEYS (Configured for FREE tier)
# ============================================
# Groq (for chat, transformations, insights - FREE)
GROQ_API_KEY=your_groq_api_key_here
# Google Gemini (for embeddings, long context - FREE)
GOOGLE_API_KEY=your_google_api_key_here
# Llama (if using via Ollama or another provider)
# If using Ollama locally/remote, set the base URL:
# OLLAMA_API_BASE=http://your-ollama-host:11434
# OpenAI (optional - for GPT models, embeddings, TTS)
# OPENAI_API_KEY=sk-your_openai_key_here
# Anthropic (optional - for Claude models)
# ANTHROPIC_API_KEY=sk-ant-your_anthropic_key_here
# Mistral (optional - for Mistral models)
# MISTRAL_API_KEY=your_mistral_key_here
# DeepSeek (optional - for DeepSeek models)
# DEEPSEEK_API_KEY=your_deepseek_key_here
# XAI (optional - for Grok models)
# XAI_API_KEY=your_xai_key_here
# OpenRouter (optional - access multiple models via one API)
# OPENROUTER_API_KEY=your_openrouter_key_here
# OPENROUTER_BASE_URL=https://openrouter.ai/api/v1
# ============================================
# PODCAST FEATURES (Optional)
# ============================================
# ElevenLabs for high-quality text-to-speech
# ELEVENLABS_API_KEY=your_elevenlabs_key_here
# TTS batch size (adjust based on provider)
# OpenAI/Google: 5, ElevenLabs: 2, Custom: 1
# TTS_BATCH_SIZE=5
# ============================================
# EMBEDDINGS (Optional - if not using default)
# ============================================
# Voyage AI for advanced embeddings
# VOYAGE_API_KEY=your_voyage_key_here
# ============================================
# WEB SCRAPING (Optional)
# ============================================
# Firecrawl for enhanced web scraping
# FIRECRAWL_API_KEY=your_firecrawl_key_here
# Jina AI for web reading and embeddings
# JINA_API_KEY=your_jina_key_here
# ============================================
# SECURITY (Optional but Recommended)
# ============================================
# Protect your instance with a password for public hosting
# OPEN_NOTEBOOK_PASSWORD=your_secure_password_here
# ============================================
# ADVANCED: TIMEOUT CONFIGURATION (Optional)
# ============================================
# Only adjust these if you experience timeout issues
# API client timeout (seconds) - how long frontend waits for responses
# Default: 300 (5 minutes)
# Increase for slow models or large documents
# API_CLIENT_TIMEOUT=300
# LLM provider timeout (seconds) - how long to wait for AI model response
# Default: 60 seconds
# Increase for slow local models (Ollama on CPU, etc.)
# ESPERANTO_LLM_TIMEOUT=60
# ============================================
# NOTES FOR RAILWAY DEPLOYMENT
# ============================================
# 1. PORT variable is automatically set by Railway - DO NOT override it
# 2. Railway will expose your app on the PORT it assigns (usually 8080)
# 3. Set API_URL AFTER your first deploy when you get your Railway domain
# 4. Use 127.0.0.1 (not localhost) for internal connections
# 5. Keep database and API settings as-is for single container deployment