{ "cells": [ { "cell_type": "markdown", "id": "fb1f1fdc", "metadata": { "papermill": { "duration": 0.002985, "end_time": "2026-01-10T18:17:32.170524", "exception": false, "start_time": "2026-01-10T18:17:32.167539", "status": "completed" }, "tags": [], "id": "fb1f1fdc" }, "source": [ "# **biplet-dino-colmap-2dgs**" ] }, { "cell_type": "markdown", "source": [ "# 新しいセクション" ], "metadata": { "id": "jK0ja9PfddVA" }, "id": "jK0ja9PfddVA" }, { "cell_type": "code", "source": [ "#サイズの異なる画像を扱う\n", "from google.colab import drive\n", "drive.mount('/content/drive')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "JON4rYSEOzCg", "outputId": "458cec38-282c-48a0-a836-832559e5acf1" }, "id": "JON4rYSEOzCg", "execution_count": 32, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] } ] }, { "cell_type": "code", "execution_count": 33, "id": "22353010", "metadata": { "execution": { "iopub.execute_input": "2026-01-10T18:17:32.181455Z", "iopub.status.busy": "2026-01-10T18:17:32.180969Z", "iopub.status.idle": "2026-01-10T18:17:32.355942Z", "shell.execute_reply": "2026-01-10T18:17:32.355229Z" }, "papermill": { "duration": 0.179454, "end_time": "2026-01-10T18:17:32.357275", "exception": false, "start_time": "2026-01-10T18:17:32.177821", "status": "completed" }, "tags": [], "id": "22353010" }, "outputs": [], "source": [ "import os\n", "import sys\n", "import subprocess\n", "import shutil\n", "from pathlib import Path\n", "import cv2\n", "from PIL import Image\n", "import glob\n", "\n", "IMAGE_PATH=\"/content/drive/MyDrive/your_folder/fountain100\"\n", "\n", "#WORK_DIR = '/content/gaussian-splatting'\n", "WORK_DIR = \"/content/2d-gaussian-splatting\"\n", "\n", "OUTPUT_DIR = '/content/output'\n", "COLMAP_DIR = '/content/colmap_data'" ] }, { "cell_type": "code", "execution_count": 34, "id": "be6df249", "metadata": { "execution": { "iopub.execute_input": "2026-01-10T18:17:32.363444Z", "iopub.status.busy": "2026-01-10T18:17:32.363175Z", "iopub.status.idle": "2026-01-10T18:22:43.720241Z", "shell.execute_reply": "2026-01-10T18:22:43.719380Z" }, "papermill": { "duration": 311.361656, "end_time": "2026-01-10T18:22:43.721610", "exception": false, "start_time": "2026-01-10T18:17:32.359954", "status": "completed" }, "tags": [], "id": "be6df249", "outputId": "4d17052f-2c01-4f3e-ebd6-cb864bc264a5", "colab": { "base_uri": "https://localhost:8080/" } }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\n", "\n", "======================================================================\n", "STEP 0: Fix NumPy (Python 3.12 compatible)\n", "======================================================================\n", "Running: /usr/bin/python3 -m pip uninstall -y numpy\n", "Running: /usr/bin/python3 -m pip install numpy==1.26.4\n", "Running: /usr/bin/python3 -c import numpy; print('NumPy:', numpy.__version__)\n", "\n", "======================================================================\n", "STEP 1: System packages\n", "======================================================================\n", "Running: apt-get update -qq\n", "Running: apt-get install -y -qq colmap build-essential cmake git libopenblas-dev xvfb\n", "\n", "======================================================================\n", "STEP 2: Clone Gaussian Splatting\n", "======================================================================\n", "✓ Repository already exists\n", "\n", "======================================================================\n", "STEP 3: Python packages (VERBOSE MODE)\n", "======================================================================\n", "\n", "📦 Installing PyTorch...\n", "Running: /usr/bin/python3 -m pip install torch torchvision torchaudio\n", "\n", "📦 Installing core utilities...\n", "Running: /usr/bin/python3 -m pip install opencv-python pillow imageio imageio-ffmpeg plyfile tqdm tensorboard\n", "\n", "📦 Installing transformers (NumPy 1.26 compatible)...\n", "Running: /usr/bin/python3 -m pip install transformers==4.40.0\n", "\n", "📦 Installing LightGlue stack...\n", "Running: /usr/bin/python3 -m pip install kornia\n", "Running: /usr/bin/python3 -m pip install h5py\n", "Running: /usr/bin/python3 -m pip install matplotlib\n", "Running: /usr/bin/python3 -m pip install pycolmap\n", "\n", "======================================================================\n", "STEP 4: Detailed Verification\n", "======================================================================\n", "\n", "🔍 Testing NumPy...\n", " ✓ NumPy: 2.0.2\n", "\n", "🔍 Testing PyTorch...\n", " ✓ PyTorch: 2.9.0+cu128\n", " ✓ CUDA available: True\n", " ✓ CUDA version: 12.8\n", "\n", "🔍 Testing transformers...\n", " ✓ transformers version: 4.40.0\n", " ✓ AutoModel import: OK\n", "\n", "🔍 Testing pycolmap...\n", " ✓ pycolmap: OK\n", "\n", "🔍 Testing kornia...\n", " ✓ kornia: 0.8.2\n" ] } ], "source": [ "def run_cmd(cmd, check=True, capture=False, cwd=None): # ← cwd=None を追加\n", " \"\"\"Run command with better error handling\"\"\"\n", " print(f\"Running: {' '.join(cmd)}\")\n", " result = subprocess.run(\n", " cmd,\n", " capture_output=capture,\n", " text=True,\n", " check=False,\n", " cwd=cwd # ← ここに渡す\n", " )\n", " if check and result.returncode != 0:\n", " print(f\"❌ Command failed with code {result.returncode}\")\n", " if capture:\n", " print(f\"STDOUT: {result.stdout}\")\n", " print(f\"STDERR: {result.stderr}\")\n", " return result\n", "\n", "\n", "def setup_environment():\n", " \"\"\"\n", " Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n", " Python 3.12 compatible version (v8)\n", " \"\"\"\n", "\n", " print(\"🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n", "\n", " WORK_DIR = \"2d-gaussian-splatting\"\n", "\n", " # =====================================================================\n", " # STEP 0: NumPy FIX (Python 3.12 compatible)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n", " print(\"=\"*70)\n", "\n", " # Python 3.12 requires numpy >= 1.26\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n", "\n", " # sanity check\n", " run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n", "\n", " # =====================================================================\n", " # STEP 1: System packages (Colab)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 1: System packages\")\n", " print(\"=\"*70)\n", "\n", " run_cmd([\"apt-get\", \"update\", \"-qq\"])\n", " run_cmd([\n", " \"apt-get\", \"install\", \"-y\", \"-qq\",\n", " \"colmap\",\n", " \"build-essential\",\n", " \"cmake\",\n", " \"git\",\n", " \"libopenblas-dev\",\n", " \"xvfb\"\n", " ])\n", "\n", " # virtual display (COLMAP / OpenCV safety)\n", " os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n", " os.environ[\"DISPLAY\"] = \":99\"\n", " subprocess.Popen(\n", " [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n", " stdout=subprocess.DEVNULL,\n", " stderr=subprocess.DEVNULL\n", " )\n", "\n", " # =====================================================================\n", " # STEP 2: Clone 2D Gaussian Splatting\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 2: Clone Gaussian Splatting\")\n", " print(\"=\"*70)\n", "\n", " if not os.path.exists(WORK_DIR):\n", " run_cmd([\n", " \"git\", \"clone\", \"--recursive\",\n", " \"https://github.com/hbb1/2d-gaussian-splatting.git\",\n", " WORK_DIR\n", " ])\n", " else:\n", " print(\"✓ Repository already exists\")\n", "\n", " # =====================================================================\n", " # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 3: Python packages (VERBOSE MODE)\")\n", " print(\"=\"*70)\n", "\n", " # ---- PyTorch (Colab CUDA対応) ----\n", " print(\"\\n📦 Installing PyTorch...\")\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"torch\", \"torchvision\", \"torchaudio\"\n", " ])\n", "\n", " # ---- Core utils ----\n", " print(\"\\n📦 Installing core utilities...\")\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"opencv-python\",\n", " \"pillow\",\n", " \"imageio\",\n", " \"imageio-ffmpeg\",\n", " \"plyfile\",\n", " \"tqdm\",\n", " \"tensorboard\"\n", " ])\n", "\n", " # ---- transformers (NumPy 1.26 compatible) ----\n", " print(\"\\n📦 Installing transformers (NumPy 1.26 compatible)...\")\n", " # Install transformers with proper dependencies\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"transformers==4.40.0\"\n", " ])\n", "\n", " # ---- LightGlue stack (GITHUB INSTALL) ----\n", " print(\"\\n📦 Installing LightGlue stack...\")\n", "\n", " # Install kornia first\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n", "\n", " # Install h5py (sometimes needed)\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n", "\n", " # Install matplotlib (LightGlue dependency)\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n", "\n", " # Install pycolmap\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n", "\n", "\n", "\n", " # =====================================================================\n", " # STEP 4: Detailed Verification\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 4: Detailed Verification\")\n", " print(\"=\"*70)\n", "\n", " # NumPy (verify version first)\n", " print(\"\\n🔍 Testing NumPy...\")\n", " try:\n", " import numpy as np\n", " print(f\" ✓ NumPy: {np.__version__}\")\n", " except Exception as e:\n", " print(f\" ❌ NumPy failed: {e}\")\n", "\n", " # PyTorch\n", " print(\"\\n🔍 Testing PyTorch...\")\n", " try:\n", " import torch\n", " print(f\" ✓ PyTorch: {torch.__version__}\")\n", " print(f\" ✓ CUDA available: {torch.cuda.is_available()}\")\n", " if torch.cuda.is_available():\n", " print(f\" ✓ CUDA version: {torch.version.cuda}\")\n", " except Exception as e:\n", " print(f\" ❌ PyTorch failed: {e}\")\n", "\n", " # transformers\n", " print(\"\\n🔍 Testing transformers...\")\n", " try:\n", " import transformers\n", " print(f\" ✓ transformers version: {transformers.__version__}\")\n", " from transformers import AutoModel\n", " print(f\" ✓ AutoModel import: OK\")\n", " except Exception as e:\n", " print(f\" ❌ transformers failed: {e}\")\n", " print(f\" Attempting detailed diagnosis...\")\n", " result = run_cmd([\n", " sys.executable, \"-c\",\n", " \"import transformers; print(transformers.__version__)\"\n", " ], capture=True)\n", " print(f\" Output: {result.stdout}\")\n", " print(f\" Error: {result.stderr}\")\n", "\n", " # pycolmap\n", " print(\"\\n🔍 Testing pycolmap...\")\n", " try:\n", " import pycolmap\n", " print(f\" ✓ pycolmap: OK\")\n", " except Exception as e:\n", " print(f\" ❌ pycolmap failed: {e}\")\n", "\n", " # kornia\n", " print(\"\\n🔍 Testing kornia...\")\n", " try:\n", " import kornia\n", " print(f\" ✓ kornia: {kornia.__version__}\")\n", " except Exception as e:\n", " print(f\" ❌ kornia failed: {e}\")\n", "\n", " return WORK_DIR\n", "\n", "\n", "if __name__ == \"__main__\":\n", " setup_environment()" ] }, { "cell_type": "code", "source": [], "metadata": { "id": "3UEcAPBILz6Z" }, "id": "3UEcAPBILz6Z", "execution_count": 34, "outputs": [] }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# STEP 4: Build 2D GS submodules (確実な方法)\n", "# =====================================================================\n", "print(\"\\n\" + \"=\"*70)\n", "print(\"STEP 5: Build Gaussian Splatting submodules\")\n", "print(\"=\"*70)\n", "\n", "# diff-surfel-rasterization\n", "\n", "path = os.path.join(WORK_DIR, \"submodules\", \"diff-surfel-rasterization\")\n", "url = \"https://github.com/hbb1/diff-surfel-rasterization.git\"\n", "name = os.path.basename(path)\n", "print(f\"\\n📦 Processing {name}...\")\n", "if not os.path.exists(path):\n", " print(f\" > Cloning {url}...\")\n", " # 親ディレクトリが存在することを確認\n", " os.makedirs(os.path.dirname(path), exist_ok=True)\n", " run_cmd([\"git\", \"clone\", url, path])\n", "else:\n", " print(f\" ✓ {name} already exists.\")\n", "# 2. setup.py install (コンパイル)\n", "print(f\" > Compiling and Installing {name}...\")\n", "result = run_cmd(\n", " [sys.executable, \"setup.py\", \"install\"],\n", " cwd=path,\n", " check=False, # エラーでも止めない\n", " capture=True\n", ")\n", "if result.returncode != 0:\n", " print(f\"❌ Failed to build {name}\")\n", " print(\"--- STDERR ---\")\n", " print(result.stderr)\n", "else:\n", " print(f\"✅ Successfully built {name}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "kLdJ-FeT-kQc", "outputId": "1366deca-2c20-49f1-a540-6528b2827efd" }, "id": "kLdJ-FeT-kQc", "execution_count": 35, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "======================================================================\n", "STEP 5: Build Gaussian Splatting submodules\n", "======================================================================\n", "\n", "📦 Processing diff-surfel-rasterization...\n", " ✓ diff-surfel-rasterization already exists.\n", " > Compiling and Installing diff-surfel-rasterization...\n", "Running: /usr/bin/python3 setup.py install\n", "✅ Successfully built diff-surfel-rasterization\n" ] } ] }, { "cell_type": "code", "source": [ "import os\n", "import sys\n", "import shutil\n", "import subprocess\n", "\n", "# --- 前準備: 環境の整備 ---\n", "print(\"Configuring build environment...\")\n", "# 1. CUDAコンパイラの確認\n", "!nvcc --version\n", "\n", "# 2. 必須ツールのインストール (ninjaはビルドを安定・高速化させます)\n", "!pip install setuptools wheel ninja\n", "\n", "# 3. 環境変数のセットアップ (CUDAのパスを明示的に指定)\n", "os.environ[\"CUDA_HOME\"] = \"/usr/local/cuda\"\n", "os.environ[\"PATH\"] = f'{os.environ[\"CUDA_HOME\"]}/bin:{os.environ[\"PATH\"]}'\n", "os.environ[\"LD_LIBRARY_PATH\"] = f'{os.environ[\"CUDA_HOME\"]}/lib64:{os.environ[\"LD_LIBRARY_PATH\"]}'\n", "# メモリ不足によるクラッシュを防ぐため、並列ビルド数を制限\n", "os.environ[\"MAX_JOBS\"] = \"2\"\n", "\n", "def run_cmd(cmd, cwd=None, check=True):\n", " \"\"\"コマンド実行用のヘルパー関数\"\"\"\n", " return subprocess.run(cmd, cwd=cwd, capture_output=True, text=True, check=check)\n", "\n", "def install_submodule(name, url, base_dir):\n", " \"\"\"個別のサブモジュールをインストール\"\"\"\n", " print(f\"\\n{'='*70}\")\n", " print(f\"Installing {name}\")\n", " print(f\"{'='*70}\")\n", "\n", " # 絶対パスを使用\n", " path = os.path.abspath(os.path.join(base_dir, \"submodules\", name))\n", " print(f\" > Target path: {path}\")\n", "\n", " # Step 1: 既存を削除\n", " if os.path.exists(path):\n", " print(f\" > Removing old {name}...\")\n", " shutil.rmtree(path)\n", "\n", " # Step 2: クローン\n", " print(f\" > Cloning from {url}...\")\n", " os.makedirs(os.path.dirname(path), exist_ok=True)\n", " try:\n", " run_cmd([\"git\", \"clone\", url, path])\n", " except subprocess.CalledProcessError as e:\n", " print(f\"❌ Failed to clone {name}\")\n", " print(e.stderr)\n", " return False\n", "\n", " # Step 3: ファイル確認 (spatial.cu 等の存在をチェック)\n", " print(f\" > Checking cloned files...\")\n", " files = os.listdir(path)\n", " print(f\" > Files in {name}: {files[:10]}...\")\n", "\n", " # Step 4: 特定モジュールのサブモジュール初期化\n", " if name == \"diff-surfel-rasterization\":\n", " print(f\" > Initializing GLM submodule...\")\n", " run_cmd([\"git\", \"submodule\", \"update\", \"--init\", \"--recursive\"], cwd=path)\n", "\n", " # Step 5: ビルドキャッシュ削除\n", " build_dir = os.path.join(path, \"build\")\n", " if os.path.exists(build_dir):\n", " print(f\" > Cleaning build cache...\")\n", " shutil.rmtree(build_dir)\n", "\n", " # Step 6: インストール\n", " print(f\" > Installing {name} (This may take a few minutes)...\")\n", " # 環境変数を明示的に引き継ぐ\n", " current_env = os.environ.copy()\n", "\n", " result = subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\", \"-e\", \".\", \"--no-build-isolation\", \"-v\"],\n", " cwd=path,\n", " env=current_env,\n", " capture_output=True,\n", " text=True\n", " )\n", "\n", " if result.returncode != 0:\n", " print(f\"❌ Failed to install {name}\")\n", " # C++/CUDAのビルドエラーは stdout に出ることが多いため、両方出力\n", " print(\"\\n--- STDOUT (Build Logs) ---\")\n", " stdout_lines = result.stdout.split('\\n')\n", " print('\\n'.join(stdout_lines[-60:])) # 最後の60行を表示\n", "\n", " print(\"\\n--- STDERR (Error Details) ---\")\n", " print(result.stderr)\n", " return False\n", "\n", " print(f\"✅ Successfully installed {name}\")\n", " return True\n", "\n", "# =====================================================================\n", "# STEP 4: Build 2D GS submodules\n", "# =====================================================================\n", "print(\"\\n\" + \"=\"*70)\n", "print(\"STEP 4: Build Gaussian Splatting submodules\")\n", "print(\"=\"*70)\n", "\n", "# Colabの場合は絶対パス\n", "WORK_DIR = \"/content/2d-gaussian-splatting\"\n", "\n", "# 各サブモジュールのインストール\n", "# simple-knn\n", "success_knn = install_submodule(\n", " \"simple-knn\",\n", " \"https://github.com/tztechno/simple-knn.git\",\n", " WORK_DIR\n", ")\n", "\n", "\n", "# 結果表示\n", "print(\"\\n\" + \"=\"*70)\n", "print(\"Installation Summary\")\n", "print(\"=\"*70)\n", "print(f\"simple-knn: {'✅ Success' if success_knn else '❌ Failed'}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "qYgJl2Fw_Phk", "outputId": "58d7c749-fe3c-44b5-a64a-214f57dda063" }, "id": "qYgJl2Fw_Phk", "execution_count": 36, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Configuring build environment...\n", "nvcc: NVIDIA (R) Cuda compiler driver\n", "Copyright (c) 2005-2025 NVIDIA Corporation\n", "Built on Fri_Feb_21_20:23:50_PST_2025\n", "Cuda compilation tools, release 12.8, V12.8.93\n", "Build cuda_12.8.r12.8/compiler.35583870_0\n", "\u001b[33mDEPRECATION: Loading egg at /usr/local/lib/python3.12/dist-packages/diff_surfel_rasterization-0.0.1-py3.12-linux-x86_64.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n", "\u001b[0mRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (75.2.0)\n", "Requirement already satisfied: wheel in /usr/local/lib/python3.12/dist-packages (0.46.3)\n", "Requirement already satisfied: ninja in /usr/local/lib/python3.12/dist-packages (1.13.0)\n", "Requirement already satisfied: packaging>=24.0 in /usr/local/lib/python3.12/dist-packages (from wheel) (26.0)\n", "\n", "======================================================================\n", "STEP 4: Build Gaussian Splatting submodules\n", "======================================================================\n", "\n", "======================================================================\n", "Installing simple-knn\n", "======================================================================\n", " > Target path: /content/2d-gaussian-splatting/submodules/simple-knn\n", " > Removing old simple-knn...\n", " > Cloning from https://github.com/tztechno/simple-knn.git...\n", " > Checking cloned files...\n", " > Files in simple-knn: ['.git', 'setup.py', 'simple_knn.h', 'simple_knn', 'README.md', 'spatial.h', 'simple_knn0.cu', 'spatial.cu', '.gitignore', 'ext.cpp']...\n", " > Installing simple-knn (This may take a few minutes)...\n", "✅ Successfully installed simple-knn\n", "\n", "======================================================================\n", "Installation Summary\n", "======================================================================\n", "simple-knn: ✅ Success\n" ] } ] }, { "cell_type": "code", "source": [ "!pip install trimesh" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "-ZfMABILvydS", "outputId": "07356f72-9a60-4103-ee76-ee2da6f3d542" }, "id": "-ZfMABILvydS", "execution_count": 45, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[33mDEPRECATION: Loading egg at /usr/local/lib/python3.12/dist-packages/diff_surfel_rasterization-0.0.1-py3.12-linux-x86_64.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n", "\u001b[0mCollecting trimesh\n", " Downloading trimesh-4.11.2-py3-none-any.whl.metadata (13 kB)\n", "Requirement already satisfied: numpy>=1.20 in /usr/local/lib/python3.12/dist-packages (from trimesh) (2.4.2)\n", "Downloading trimesh-4.11.2-py3-none-any.whl (740 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m740.3/740.3 kB\u001b[0m \u001b[31m39.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: trimesh\n", "Successfully installed trimesh-4.11.2\n" ] } ] }, { "cell_type": "code", "source": [ "def setup_2dgs_environment():\n", " \"\"\"2DGS環境のセットアップ(完全版)\"\"\"\n", " print(\"Setting up 2DGS environment...\")\n", "\n", " # 必要なパッケージをすべてインストール\n", " packages = [\n", " 'plyfile',\n", " 'mediapy',\n", " 'open3d', # ← これを追加\n", " ]\n", "\n", " for pkg in packages:\n", " print(f\"Installing {pkg}...\")\n", " subprocess.run(['pip', 'install', pkg], check=True)\n", "\n", " # 2DGSリポジトリのクローン\n", " if not os.path.exists(WORK_DIR):\n", " subprocess.run([\n", " 'git', 'clone', '--recursive',\n", " 'https://github.com/hbb1/2d-gaussian-splatting.git',\n", " WORK_DIR\n", " ], check=True)\n", "\n", " subprocess.run(['git', 'submodule', 'update', '--init', '--recursive'],\n", " cwd=WORK_DIR, check=True)\n", "\n", " build_2dgs_submodules()\n", "\n", " print(\"✅ 2DGS environment setup complete\")" ], "metadata": { "id": "kXPLG7byqFlr" }, "id": "kXPLG7byqFlr", "execution_count": 37, "outputs": [] }, { "cell_type": "code", "source": [ "!pip install open3d" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "55dtC6ByqJRY", "outputId": "11171aa0-0dc5-4235-c39b-6e19985a5632" }, "id": "55dtC6ByqJRY", "execution_count": 38, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[33mDEPRECATION: Loading egg at /usr/local/lib/python3.12/dist-packages/diff_surfel_rasterization-0.0.1-py3.12-linux-x86_64.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n", "\u001b[0mRequirement already satisfied: open3d in /usr/local/lib/python3.12/dist-packages (0.19.0)\n", "Requirement already satisfied: numpy>=1.18.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (2.4.2)\n", "Requirement already satisfied: dash>=2.6.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (4.0.0)\n", "Requirement already satisfied: werkzeug>=3.0.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (3.1.5)\n", "Requirement already satisfied: flask>=3.0.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (3.1.2)\n", "Requirement already satisfied: nbformat>=5.7.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (5.10.4)\n", "Requirement already satisfied: configargparse in /usr/local/lib/python3.12/dist-packages (from open3d) (1.7.1)\n", "Requirement already satisfied: ipywidgets>=8.0.4 in /usr/local/lib/python3.12/dist-packages (from open3d) (8.1.8)\n", "Requirement already satisfied: addict in /usr/local/lib/python3.12/dist-packages (from open3d) (2.4.0)\n", "Requirement already satisfied: pillow>=9.3.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (11.3.0)\n", "Requirement already satisfied: matplotlib>=3 in /usr/local/lib/python3.12/dist-packages (from open3d) (3.10.0)\n", "Requirement already satisfied: pandas>=1.0 in /usr/local/lib/python3.12/dist-packages (from open3d) (2.2.2)\n", "Requirement already satisfied: pyyaml>=5.4.1 in /usr/local/lib/python3.12/dist-packages (from open3d) (6.0.3)\n", "Requirement already satisfied: scikit-learn>=0.21 in /usr/local/lib/python3.12/dist-packages (from open3d) (1.6.1)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from open3d) (4.67.3)\n", "Requirement already satisfied: pyquaternion in /usr/local/lib/python3.12/dist-packages (from open3d) (0.9.9)\n", "Requirement already satisfied: plotly>=5.0.0 in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (5.24.1)\n", "Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (8.7.1)\n", "Requirement already satisfied: typing_extensions>=4.1.1 in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (4.15.0)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (2.32.4)\n", "Requirement already satisfied: retrying in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (1.4.2)\n", "Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (1.6.0)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from dash>=2.6.0->open3d) (75.2.0)\n", "Requirement already satisfied: blinker>=1.9.0 in /usr/local/lib/python3.12/dist-packages (from flask>=3.0.0->open3d) (1.9.0)\n", "Requirement already satisfied: click>=8.1.3 in /usr/local/lib/python3.12/dist-packages (from flask>=3.0.0->open3d) (8.3.1)\n", "Requirement already satisfied: itsdangerous>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from flask>=3.0.0->open3d) (2.2.0)\n", "Requirement already satisfied: jinja2>=3.1.2 in /usr/local/lib/python3.12/dist-packages (from flask>=3.0.0->open3d) (3.1.6)\n", "Requirement already satisfied: markupsafe>=2.1.1 in /usr/local/lib/python3.12/dist-packages (from flask>=3.0.0->open3d) (3.0.3)\n", "Requirement already satisfied: comm>=0.1.3 in /usr/local/lib/python3.12/dist-packages (from ipywidgets>=8.0.4->open3d) (0.2.3)\n", "Requirement already satisfied: ipython>=6.1.0 in /usr/local/lib/python3.12/dist-packages (from ipywidgets>=8.0.4->open3d) (7.34.0)\n", "Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.12/dist-packages (from ipywidgets>=8.0.4->open3d) (5.7.1)\n", "Requirement already satisfied: widgetsnbextension~=4.0.14 in /usr/local/lib/python3.12/dist-packages (from ipywidgets>=8.0.4->open3d) (4.0.15)\n", "Requirement already satisfied: jupyterlab_widgets~=3.0.15 in /usr/local/lib/python3.12/dist-packages (from ipywidgets>=8.0.4->open3d) (3.0.16)\n", "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (1.3.3)\n", "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (0.12.1)\n", "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (4.61.1)\n", "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (1.4.9)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (26.0)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (3.3.2)\n", "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3->open3d) (2.9.0.post0)\n", "Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.12/dist-packages (from nbformat>=5.7.0->open3d) (2.21.2)\n", "Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.12/dist-packages (from nbformat>=5.7.0->open3d) (4.26.0)\n", "Requirement already satisfied: jupyter-core!=5.0.*,>=4.12 in /usr/local/lib/python3.12/dist-packages (from nbformat>=5.7.0->open3d) (5.9.1)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas>=1.0->open3d) (2025.2)\n", "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas>=1.0->open3d) (2025.3)\n", "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn>=0.21->open3d) (1.16.3)\n", "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn>=0.21->open3d) (1.5.3)\n", "Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn>=0.21->open3d) (3.6.0)\n", "Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.19.2)\n", "Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (4.4.2)\n", "Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.7.5)\n", "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (3.0.52)\n", "Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (2.19.2)\n", "Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.2.0)\n", "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.2.1)\n", "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (4.9.0)\n", "Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.12/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (25.4.0)\n", "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.12/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (2025.9.1)\n", "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.12/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (0.37.0)\n", "Requirement already satisfied: rpds-py>=0.25.0 in /usr/local/lib/python3.12/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d) (0.30.0)\n", "Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.12/dist-packages (from jupyter-core!=5.0.*,>=4.12->nbformat>=5.7.0->open3d) (4.5.1)\n", "Requirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.12/dist-packages (from plotly>=5.0.0->dash>=2.6.0->open3d) (9.1.3)\n", "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.7->matplotlib>=3->open3d) (1.17.0)\n", "Requirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib-metadata->dash>=2.6.0->open3d) (3.23.0)\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->dash>=2.6.0->open3d) (3.4.4)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->dash>=2.6.0->open3d) (3.11)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->dash>=2.6.0->open3d) (2.5.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->dash>=2.6.0->open3d) (2026.1.4)\n", "Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.8.5)\n", "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.7.0)\n", "Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=6.1.0->ipywidgets>=8.0.4->open3d) (0.5.3)\n" ] } ] }, { "cell_type": "code", "source": [ "\n", "\n", "\n", "# 再度レンダリング実行\n", "import subprocess\n", "result = subprocess.run(\n", " ['/usr/bin/python3', 'render.py',\n", " '-m', '/content/2d-gaussian-splatting/output/video',\n", " '--iteration', '1000',\n", " '--skip_test',\n", " '--skip_train'],\n", " cwd='/content/2d-gaussian-splatting',\n", " capture_output=True,\n", " text=True\n", ")\n", "\n", "print(\"=== STDOUT ===\")\n", "print(result.stdout)\n", "print(\"\\n=== STDERR ===\")\n", "print(result.stderr)\n", "print(f\"\\n=== EXIT CODE: {result.returncode} ===\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "vRxNgRnypv0l", "outputId": "80d48d2c-7fe5-42af-932c-10ef9e9dffb2" }, "id": "vRxNgRnypv0l", "execution_count": 39, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "=== STDOUT ===\n", "\n", "\n", "=== STDERR ===\n", "Traceback (most recent call last):\n", " File \"/content/2d-gaussian-splatting/render.py\", line 23, in \n", " from utils.mesh_utils import GaussianExtractor, to_cam_open3d, post_process_mesh\n", " File \"/content/2d-gaussian-splatting/utils/mesh_utils.py\", line 20, in \n", " import trimesh\n", "ModuleNotFoundError: No module named 'trimesh'\n", "\n", "\n", "=== EXIT CODE: 1 ===\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "1W62vlfhe9TS" }, "id": "1W62vlfhe9TS", "execution_count": 39, "outputs": [] }, { "cell_type": "code", "source": [ "!nvcc --version\n", "import torch\n", "print(torch.__version__)\n", "print(torch.version.cuda)" ], "metadata": { "id": "Ev9PEUdtpEAx", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "e858fa2e-eb9a-4814-f5f5-de8173bf8cdb" }, "id": "Ev9PEUdtpEAx", "execution_count": 40, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "nvcc: NVIDIA (R) Cuda compiler driver\n", "Copyright (c) 2005-2025 NVIDIA Corporation\n", "Built on Fri_Feb_21_20:23:50_PST_2025\n", "Cuda compilation tools, release 12.8, V12.8.93\n", "Build cuda_12.8.r12.8/compiler.35583870_0\n", "2.9.0+cu128\n", "12.8\n" ] } ] }, { "cell_type": "code", "execution_count": 41, "id": "b8690389", "metadata": { "execution": { "iopub.execute_input": "2026-01-10T18:22:43.739411Z", "iopub.status.busy": "2026-01-10T18:22:43.738855Z", "iopub.status.idle": "2026-01-10T18:22:43.755664Z", "shell.execute_reply": "2026-01-10T18:22:43.754865Z" }, "papermill": { "duration": 0.027297, "end_time": "2026-01-10T18:22:43.756758", "exception": false, "start_time": "2026-01-10T18:22:43.729461", "status": "completed" }, "tags": [], "id": "b8690389" }, "outputs": [], "source": [ "import os\n", "import glob\n", "import cv2\n", "import numpy as np\n", "from PIL import Image\n", "\n", "# =========================================================\n", "# Utility: aspect ratio preserved + black padding\n", "# =========================================================\n", "\n", "def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n", " \"\"\"\n", " Generates two square crops (Left & Right or Top & Bottom)\n", " from each image in a directory and returns the output directory\n", " and the list of generated file paths.\n", "\n", " Args:\n", " input_dir: Input directory containing source images\n", " output_dir: Output directory for processed images\n", " size: Target square size (default: 1024)\n", " max_images: Maximum number of SOURCE images to process (default: None = all images)\n", " \"\"\"\n", " if output_dir is None:\n", " output_dir = 'output/images_biplet'\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " print(f\"--- Step 1: Biplet-Square Normalization ---\")\n", " print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n", " print()\n", "\n", " generated_paths = []\n", " converted_count = 0\n", " size_stats = {}\n", "\n", " # Sort for consistent processing order\n", " image_files = sorted([f for f in os.listdir(input_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n", "\n", " # ★ max_images で元画像数を制限\n", " if max_images is not None:\n", " image_files = image_files[:max_images]\n", " print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n", "\n", " for img_file in image_files:\n", " input_path = os.path.join(input_dir, img_file)\n", " try:\n", " img = Image.open(input_path)\n", " original_size = img.size\n", "\n", " # Tracking original aspect ratios\n", " size_key = f\"{original_size[0]}x{original_size[1]}\"\n", " size_stats[size_key] = size_stats.get(size_key, 0) + 1\n", "\n", " # Generate 2 crops using the helper function\n", " crops = generate_two_crops(img, size)\n", " base_name, ext = os.path.splitext(img_file)\n", "\n", " for mode, cropped_img in crops.items():\n", " output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n", " cropped_img.save(output_path, quality=95)\n", " generated_paths.append(output_path)\n", "\n", " converted_count += 1\n", " print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n", "\n", " except Exception as e:\n", " print(f\" ✗ Error processing {img_file}: {e}\")\n", "\n", " print(f\"\\nProcessing complete: {converted_count} source images processed\")\n", " print(f\"Total output images: {len(generated_paths)}\")\n", " print(f\"Original size distribution: {size_stats}\")\n", "\n", " return output_dir, generated_paths\n", "\n", "\n", "def generate_two_crops(img, size):\n", " \"\"\"\n", " Crops the image into a square and returns 2 variations\n", " (Left/Right for landscape, Top/Bottom for portrait).\n", " \"\"\"\n", " width, height = img.size\n", " crop_size = min(width, height)\n", " crops = {}\n", "\n", " if width > height:\n", " # Landscape → Left & Right\n", " positions = {\n", " 'left': 0,\n", " 'right': width - crop_size\n", " }\n", " for mode, x_offset in positions.items():\n", " box = (x_offset, 0, x_offset + crop_size, crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " else:\n", " # Portrait or Square → Top & Bottom\n", " positions = {\n", " 'top': 0,\n", " 'bottom': height - crop_size\n", " }\n", " for mode, y_offset in positions.items():\n", " box = (0, y_offset, crop_size, y_offset + crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " return crops\n" ] }, { "cell_type": "code", "execution_count": 42, "id": "7acc20b6", "metadata": { "execution": { "iopub.execute_input": "2026-01-10T18:22:43.772525Z", "iopub.status.busy": "2026-01-10T18:22:43.772303Z", "iopub.status.idle": "2026-01-10T18:22:43.790574Z", "shell.execute_reply": "2026-01-10T18:22:43.789515Z" }, "papermill": { "duration": 0.027612, "end_time": "2026-01-10T18:22:43.791681", "exception": false, "start_time": "2026-01-10T18:22:43.764069", "status": "completed" }, "tags": [], "id": "7acc20b6" }, "outputs": [], "source": [ "def run_colmap_reconstruction(image_dir, colmap_dir):\n", " \"\"\"Estimate camera poses and 3D point cloud with COLMAP\"\"\"\n", " print(\"Running SfM reconstruction with COLMAP...\")\n", "\n", " database_path = os.path.join(colmap_dir, \"database.db\")\n", " sparse_dir = os.path.join(colmap_dir, \"sparse\")\n", " os.makedirs(sparse_dir, exist_ok=True)\n", "\n", " # Set environment variable\n", " env = os.environ.copy()\n", " env['QT_QPA_PLATFORM'] = 'offscreen'\n", "\n", " # Feature extraction\n", " print(\"1/4: Extracting features...\")\n", " subprocess.run([\n", " 'colmap', 'feature_extractor',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--ImageReader.single_camera', '1',\n", " '--ImageReader.camera_model', 'OPENCV',\n", " '--SiftExtraction.use_gpu', '0' # Use CPU\n", " ], check=True, env=env)\n", "\n", " # Feature matching\n", " print(\"2/4: Matching features...\")\n", " subprocess.run([\n", " 'colmap', 'exhaustive_matcher', # Use sequential_matcher instead of exhaustive_matcher\n", " '--database_path', database_path,\n", " '--SiftMatching.use_gpu', '0' # Use CPU\n", " ], check=True, env=env)\n", "\n", " # Sparse reconstruction\n", " print(\"3/4: Sparse reconstruction...\")\n", " subprocess.run([\n", " 'colmap', 'mapper',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--output_path', sparse_dir,\n", " '--Mapper.ba_global_max_num_iterations', '20', # Speed up\n", " '--Mapper.ba_local_max_num_iterations', '10'\n", " ], check=True, env=env)\n", "\n", " # Export to text format\n", " print(\"4/4: Exporting to text format...\")\n", " model_dir = os.path.join(sparse_dir, '0')\n", " if not os.path.exists(model_dir):\n", " # Use the first model found\n", " subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d))]\n", " if subdirs:\n", " model_dir = os.path.join(sparse_dir, subdirs[0])\n", " else:\n", " raise FileNotFoundError(\"COLMAP reconstruction failed\")\n", "\n", " subprocess.run([\n", " 'colmap', 'model_converter',\n", " '--input_path', model_dir,\n", " '--output_path', model_dir,\n", " '--output_type', 'TXT'\n", " ], check=True, env=env)\n", "\n", " print(f\"COLMAP reconstruction complete: {model_dir}\")\n", " return model_dir\n", "\n", "\n", "def convert_cameras_to_pinhole(input_file, output_file):\n", " \"\"\"Convert camera model to PINHOLE format\"\"\"\n", " print(f\"Reading camera file: {input_file}\")\n", "\n", " with open(input_file, 'r') as f:\n", " lines = f.readlines()\n", "\n", " converted_count = 0\n", " with open(output_file, 'w') as f:\n", " for line in lines:\n", " if line.startswith('#') or line.strip() == '':\n", " f.write(line)\n", " else:\n", " parts = line.strip().split()\n", " if len(parts) >= 4:\n", " cam_id = parts[0]\n", " model = parts[1]\n", " width = parts[2]\n", " height = parts[3]\n", " params = parts[4:]\n", "\n", " # Convert to PINHOLE format\n", " if model == \"PINHOLE\":\n", " f.write(line)\n", " elif model == \"OPENCV\":\n", " # OPENCV: fx, fy, cx, cy, k1, k2, p1, p2\n", " fx = params[0]\n", " fy = params[1]\n", " cx = params[2]\n", " cy = params[3]\n", " f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n", " converted_count += 1\n", " else:\n", " # Convert other models too\n", " fx = fy = max(float(width), float(height))\n", " cx = float(width) / 2\n", " cy = float(height) / 2\n", " f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n", " converted_count += 1\n", " else:\n", " f.write(line)\n", "\n", " print(f\"Converted {converted_count} cameras to PINHOLE format\")\n", "\n", "\n", "def prepare_gaussian_splatting_data(image_dir, colmap_model_dir):\n", " \"\"\"Prepare data for Gaussian Splatting\"\"\"\n", " print(\"Preparing data for Gaussian Splatting...\")\n", "\n", " data_dir = f\"{WORK_DIR}/data/video\"\n", " os.makedirs(f\"{data_dir}/sparse/0\", exist_ok=True)\n", " os.makedirs(f\"{data_dir}/images\", exist_ok=True)\n", "\n", " # Copy images\n", " print(\"Copying images...\")\n", " img_count = 0\n", " for img_file in os.listdir(image_dir):\n", " if img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " shutil.copy(\n", " os.path.join(image_dir, img_file),\n", " f\"{data_dir}/images/{img_file}\"\n", " )\n", " img_count += 1\n", " print(f\"Copied {img_count} images\")\n", "\n", " # Convert and copy camera file to PINHOLE format\n", " print(\"Converting camera model to PINHOLE format...\")\n", " convert_cameras_to_pinhole(\n", " os.path.join(colmap_model_dir, 'cameras.txt'),\n", " f\"{data_dir}/sparse/0/cameras.txt\"\n", " )\n", "\n", " # Copy other files\n", " for filename in ['images.txt', 'points3D.txt']:\n", " src = os.path.join(colmap_model_dir, filename)\n", " dst = f\"{data_dir}/sparse/0/{filename}\"\n", " if os.path.exists(src):\n", " shutil.copy(src, dst)\n", " print(f\"Copied {filename}\")\n", " else:\n", " print(f\"Warning: {filename} not found\")\n", "\n", " print(f\"Data preparation complete: {data_dir}\")\n", " return data_dir\n", "\n", "def run_colmap_reconstruction(image_dir, colmap_dir):\n", " \"\"\"Estimate camera poses and 3D point cloud with COLMAP\"\"\"\n", " print(\"Running SfM reconstruction with COLMAP...\")\n", "\n", " database_path = os.path.join(colmap_dir, \"database.db\")\n", " sparse_dir = os.path.join(colmap_dir, \"sparse\")\n", " os.makedirs(sparse_dir, exist_ok=True)\n", "\n", " # Set environment variable\n", " env = os.environ.copy()\n", " env['QT_QPA_PLATFORM'] = 'offscreen'\n", "\n", " # Feature extraction\n", " print(\"1/4: Extracting features...\")\n", " subprocess.run([\n", " 'colmap', 'feature_extractor',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--ImageReader.single_camera', '1',\n", " '--ImageReader.camera_model', 'OPENCV',\n", " '--SiftExtraction.use_gpu', '0' # Use CPU\n", " ], check=True, env=env)\n", "\n", " # Feature matching\n", " print(\"2/4: Matching features...\")\n", " subprocess.run([\n", " 'colmap', 'exhaustive_matcher', # Use sequential_matcher instead of exhaustive_matcher\n", " '--database_path', database_path,\n", " '--SiftMatching.use_gpu', '0' # Use CPU\n", " ], check=True, env=env)\n", "\n", " # Sparse reconstruction\n", " print(\"3/4: Sparse reconstruction...\")\n", " subprocess.run([\n", " 'colmap', 'mapper',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--output_path', sparse_dir,\n", " '--Mapper.ba_global_max_num_iterations', '20', # Speed up\n", " '--Mapper.ba_local_max_num_iterations', '10'\n", " ], check=True, env=env)\n", "\n", " # Export to text format\n", " print(\"4/4: Exporting to text format...\")\n", " model_dir = os.path.join(sparse_dir, '0')\n", " if not os.path.exists(model_dir):\n", " # Use the first model found\n", " subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d))]\n", " if subdirs:\n", " model_dir = os.path.join(sparse_dir, subdirs[0])\n", " else:\n", " raise FileNotFoundError(\"COLMAP reconstruction failed\")\n", "\n", " subprocess.run([\n", " 'colmap', 'model_converter',\n", " '--input_path', model_dir,\n", " '--output_path', model_dir,\n", " '--output_type', 'TXT'\n", " ], check=True, env=env)\n", "\n", " print(f\"COLMAP reconstruction complete: {model_dir}\")\n", " return model_dir\n", "\n", "\n", "def convert_cameras_to_pinhole(input_file, output_file):\n", " \"\"\"Convert camera model to PINHOLE format\"\"\"\n", " print(f\"Reading camera file: {input_file}\")\n", "\n", " with open(input_file, 'r') as f:\n", " lines = f.readlines()\n", "\n", " converted_count = 0\n", " with open(output_file, 'w') as f:\n", " for line in lines:\n", " if line.startswith('#') or line.strip() == '':\n", " f.write(line)\n", " else:\n", " parts = line.strip().split()\n", " if len(parts) >= 4:\n", " cam_id = parts[0]\n", " model = parts[1]\n", " width = parts[2]\n", " height = parts[3]\n", " params = parts[4:]\n", "\n", " # Convert to PINHOLE format\n", " if model == \"PINHOLE\":\n", " f.write(line)\n", " elif model == \"OPENCV\":\n", " # OPENCV: fx, fy, cx, cy, k1, k2, p1, p2\n", " fx = params[0]\n", " fy = params[1]\n", " cx = params[2]\n", " cy = params[3]\n", " f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n", " converted_count += 1\n", " else:\n", " # Convert other models too\n", " fx = fy = max(float(width), float(height))\n", " cx = float(width) / 2\n", " cy = float(height) / 2\n", " f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n", " converted_count += 1\n", " else:\n", " f.write(line)\n", "\n", " print(f\"Converted {converted_count} cameras to PINHOLE format\")\n", "\n", "\n", "def prepare_gaussian_splatting_data(image_dir, colmap_model_dir):\n", " \"\"\"Prepare data for Gaussian Splatting\"\"\"\n", " print(\"Preparing data for Gaussian Splatting...\")\n", "\n", " data_dir = f\"{WORK_DIR}/data/video\"\n", " os.makedirs(f\"{data_dir}/sparse/0\", exist_ok=True)\n", " os.makedirs(f\"{data_dir}/images\", exist_ok=True)\n", "\n", " # Copy images\n", " print(\"Copying images...\")\n", " img_count = 0\n", " for img_file in os.listdir(image_dir):\n", " if img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " shutil.copy(\n", " os.path.join(image_dir, img_file),\n", " f\"{data_dir}/images/{img_file}\"\n", " )\n", " img_count += 1\n", " print(f\"Copied {img_count} images\")\n", "\n", " # Convert and copy camera file to PINHOLE format\n", " print(\"Converting camera model to PINHOLE format...\")\n", " convert_cameras_to_pinhole(\n", " os.path.join(colmap_model_dir, 'cameras.txt'),\n", " f\"{data_dir}/sparse/0/cameras.txt\"\n", " )\n", "\n", " # Copy other files\n", " for filename in ['images.txt', 'points3D.txt']:\n", " src = os.path.join(colmap_model_dir, filename)\n", " dst = f\"{data_dir}/sparse/0/{filename}\"\n", " if os.path.exists(src):\n", " shutil.copy(src, dst)\n", " print(f\"Copied {filename}\")\n", " else:\n", " print(f\"Warning: {filename} not found\")\n", "\n", " print(f\"Data preparation complete: {data_dir}\")\n", " return data_dir\n", "\n", "\n", "\n", "###############################################################\n", "\n", "# 変更後 (2DGS) - 正則化パラメータを追加\n", "def train_gaussian_splatting(data_dir, iterations=7000,\n", " lambda_normal=0.05,\n", " lambda_dist=0, # ← distortion → dist に修正\n", " depth_ratio=0):\n", " \"\"\"\n", " 2DGS用のトレーニング関数\n", " Args:\n", " lambda_normal: 法線一貫性の重み (デフォルト: 0.05)\n", " lambda_dist: 深度歪みの重み (デフォルト: 0) # ← 名前修正\n", " depth_ratio: 0=平均深度, 1=中央値深度 (デフォルト: 0)\n", " \"\"\"\n", " model_path = f\"{WORK_DIR}/output/video\"\n", " cmd = [\n", " sys.executable, 'train.py',\n", " '-s', data_dir,\n", " '-m', model_path,\n", " '--iterations', str(iterations),\n", " '--lambda_normal', str(lambda_normal),\n", " '--lambda_dist', str(lambda_dist), # ← ここを修正!\n", " '--depth_ratio', str(depth_ratio),\n", " '--eval'\n", " ]\n", " subprocess.run(cmd, cwd=WORK_DIR, check=True)\n", " return model_path\n", "\n", "\n", "\n", "# 2DGSではメッシュ抽出オプションが追加されています\n", "def render_video_and_mesh(model_path, output_video_path, iteration=1000,\n", " extract_mesh=False, unbounded=False, mesh_res=1024):\n", " \"\"\"\n", " 2DGS用のレンダリングとメッシュ抽出\n", " Args:\n", " extract_mesh: メッシュを抽出するか (デフォルト: False、動画のみ)\n", " unbounded: 境界なしメッシュ抽出を使用するか\n", " mesh_res: メッシュ解像度\n", " \"\"\"\n", " # 通常のレンダリング\n", " cmd = [\n", " sys.executable, 'render.py',\n", " '-m', model_path,\n", " '--iteration', str(iteration),\n", " '--skip_test',\n", " '--skip_train'\n", " ]\n", "\n", " # メッシュ抽出オプション(必要な場合のみ)\n", " if extract_mesh:\n", " if unbounded:\n", " cmd.extend(['--unbounded'])\n", " cmd.extend(['--mesh_res', str(mesh_res)])\n", "\n", " # エラー詳細をキャプチャ\n", " result = subprocess.run(\n", " cmd,\n", " cwd=WORK_DIR,\n", " capture_output=True,\n", " text=True\n", " )\n", "\n", " if result.returncode != 0:\n", " print(\"❌ STDOUT:\", result.stdout)\n", " print(\"❌ STDERR:\", result.stderr)\n", " raise subprocess.CalledProcessError(\n", " result.returncode, cmd, result.stdout, result.stderr\n", " )\n", "\n", " # レンダリング結果からビデオ作成\n", " possible_dirs = [\n", " f\"{model_path}/test/ours_{iteration}/renders\",\n", " f\"{model_path}/train/ours_{iteration}/renders\",\n", " ]\n", "\n", " render_dir = None\n", " for test_dir in possible_dirs:\n", " if os.path.exists(test_dir):\n", " render_dir = test_dir\n", " print(f\"✅ Rendering directory found: {render_dir}\")\n", " break\n", "\n", " if render_dir and os.path.exists(render_dir):\n", " render_imgs = sorted([f for f in os.listdir(render_dir)\n", " if f.endswith('.png')])\n", " if render_imgs:\n", " print(f\"Found {len(render_imgs)} rendered images\")\n", " # ffmpegでビデオ作成\n", " subprocess.run([\n", " 'ffmpeg', '-y',\n", " '-framerate', '30',\n", " '-pattern_type', 'glob',\n", " '-i', f\"{render_dir}/*.png\",\n", " '-c:v', 'libx264',\n", " '-pix_fmt', 'yuv420p',\n", " '-crf', '18',\n", " output_video_path\n", " ], check=True)\n", " print(f\"✅ Video saved: {output_video_path}\")\n", " return True\n", "\n", " print(\"❌ Error: Rendering directory not found\")\n", " return False\n", "\n", "\n", "\n", "###############################################################\n", "\n", "\n", "def create_gif(video_path, gif_path):\n", " \"\"\"Create GIF from MP4\"\"\"\n", " print(\"Creating animated GIF...\")\n", "\n", " subprocess.run([\n", " 'ffmpeg', '-y',\n", " '-i', video_path,\n", " '-vf', 'setpts=8*PTS,fps=10,scale=720:-1:flags=lanczos',\n", " '-loop', '0',\n", " gif_path\n", " ], check=True)\n", "\n", " if os.path.exists(gif_path):\n", " size_mb = os.path.getsize(gif_path) / (1024 * 1024)\n", " print(f\"GIF creation complete: {gif_path} ({size_mb:.2f} MB)\")\n", " return True\n", "\n", " return False" ] }, { "cell_type": "code", "source": [], "metadata": { "id": "YtqhBP4T3jEH" }, "id": "YtqhBP4T3jEH", "execution_count": 42, "outputs": [] }, { "cell_type": "code", "source": [ "def main_pipeline(image_dir, output_dir, square_size=1024, max_images=100):\n", " \"\"\"Main execution function\"\"\"\n", " try:\n", " # Step 1: 画像の正規化と前処理\n", " print(\"=\"*60)\n", " print(\"Step 1: Normalizing and preprocessing images\")\n", " print(\"=\"*60)\n", "\n", " frame_dir = os.path.join(COLMAP_DIR, \"images\")\n", " os.makedirs(frame_dir, exist_ok=True)\n", "\n", " # 画像を正規化して直接COLMAPのディレクトリに保存\n", " num_processed = normalize_image_sizes_biplet(\n", " input_dir=image_dir,\n", " output_dir=frame_dir, # 直接colmap/imagesに保存\n", " size=square_size,\n", " max_images=max_images\n", " )\n", "\n", " print(f\"Processed {num_processed} images\")\n", "\n", " # Step 2: Estimate Camera Info with COLMAP\n", " print(\"=\"*60)\n", " print(\"Step 2: Running COLMAP reconstruction\")\n", " print(\"=\"*60)\n", " colmap_model_dir = run_colmap_reconstruction(frame_dir, COLMAP_DIR)\n", "\n", " # Step 3: Prepare Data for Gaussian Splatting\n", " print(\"=\"*60)\n", " print(\"Step 3: Preparing Gaussian Splatting data\")\n", " print(\"=\"*60)\n", " data_dir = prepare_gaussian_splatting_data(frame_dir, colmap_model_dir)\n", "\n", " # Step 4: Train Model\n", " print(\"=\"*60)\n", " print(\"Step 4: Training Gaussian Splatting model\")\n", " print(\"=\"*60)\n", " # 修正: frame_dir → data_dir\n", "\n", " # main_pipeline内で呼び出す部分\n", " model_path = train_gaussian_splatting(\n", " data_dir,\n", " iterations=1000,\n", " lambda_normal=0.05,\n", " lambda_dist=0, # ← distortion → dist に修正\n", " depth_ratio=0\n", " )\n", "\n", " print(f\"Model trained at: {model_path}\")\n", "\n", " ############################################\n", "\n", " except Exception as e: # ← これを追加\n", " print(f\"❌ Pipeline failed: {e}\")\n", " import traceback\n", " traceback.print_exc()\n", " return None\n", "\n", "\n", "if __name__ == \"__main__\":\n", " IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n", " OUTPUT_DIR = \"/content/output\"\n", " COLMAP_DIR = \"/content/colmap_workspace\"\n", "\n", " # シンプルに1つの戻り値だけ\n", " ply_path = main_pipeline(\n", " image_dir=IMAGE_DIR,\n", " output_dir=OUTPUT_DIR,\n", " square_size=1024,\n", " max_images=20\n", " )\n", "\n", "\n" ], "metadata": { "id": "fya3kv62NXM-", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "5f562d50-ffba-43df-b33d-c9a92d0383e7" }, "id": "fya3kv62NXM-", "execution_count": 54, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "============================================================\n", "Step 1: Normalizing and preprocessing images\n", "============================================================\n", "--- Step 1: Biplet-Square Normalization ---\n", "Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\n", "\n", "Processing limited to 20 source images (will generate 40 cropped images)\n", " ✓ image_101.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_102.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_103.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_104.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_105.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_106.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_107.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_108.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_109.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_110.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_111.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_112.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_113.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_114.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_115.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_116.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_117.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_118.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_119.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_120.jpeg: (1440, 1920) → 2 square images generated\n", "\n", "Processing complete: 20 source images processed\n", "Total output images: 40\n", "Original size distribution: {'1440x1920': 20}\n", "Processed ('/content/colmap_workspace/images', ['/content/colmap_workspace/images/image_101_top.jpeg', '/content/colmap_workspace/images/image_101_bottom.jpeg', '/content/colmap_workspace/images/image_102_top.jpeg', '/content/colmap_workspace/images/image_102_bottom.jpeg', '/content/colmap_workspace/images/image_103_top.jpeg', '/content/colmap_workspace/images/image_103_bottom.jpeg', '/content/colmap_workspace/images/image_104_top.jpeg', '/content/colmap_workspace/images/image_104_bottom.jpeg', '/content/colmap_workspace/images/image_105_top.jpeg', '/content/colmap_workspace/images/image_105_bottom.jpeg', '/content/colmap_workspace/images/image_106_top.jpeg', '/content/colmap_workspace/images/image_106_bottom.jpeg', '/content/colmap_workspace/images/image_107_top.jpeg', '/content/colmap_workspace/images/image_107_bottom.jpeg', '/content/colmap_workspace/images/image_108_top.jpeg', '/content/colmap_workspace/images/image_108_bottom.jpeg', '/content/colmap_workspace/images/image_109_top.jpeg', '/content/colmap_workspace/images/image_109_bottom.jpeg', '/content/colmap_workspace/images/image_110_top.jpeg', '/content/colmap_workspace/images/image_110_bottom.jpeg', '/content/colmap_workspace/images/image_111_top.jpeg', '/content/colmap_workspace/images/image_111_bottom.jpeg', '/content/colmap_workspace/images/image_112_top.jpeg', '/content/colmap_workspace/images/image_112_bottom.jpeg', '/content/colmap_workspace/images/image_113_top.jpeg', '/content/colmap_workspace/images/image_113_bottom.jpeg', '/content/colmap_workspace/images/image_114_top.jpeg', '/content/colmap_workspace/images/image_114_bottom.jpeg', '/content/colmap_workspace/images/image_115_top.jpeg', '/content/colmap_workspace/images/image_115_bottom.jpeg', '/content/colmap_workspace/images/image_116_top.jpeg', '/content/colmap_workspace/images/image_116_bottom.jpeg', '/content/colmap_workspace/images/image_117_top.jpeg', '/content/colmap_workspace/images/image_117_bottom.jpeg', '/content/colmap_workspace/images/image_118_top.jpeg', '/content/colmap_workspace/images/image_118_bottom.jpeg', '/content/colmap_workspace/images/image_119_top.jpeg', '/content/colmap_workspace/images/image_119_bottom.jpeg', '/content/colmap_workspace/images/image_120_top.jpeg', '/content/colmap_workspace/images/image_120_bottom.jpeg']) images\n", "============================================================\n", "Step 2: Running COLMAP reconstruction\n", "============================================================\n", "Running SfM reconstruction with COLMAP...\n", "1/4: Extracting features...\n", "2/4: Matching features...\n", "3/4: Sparse reconstruction...\n", "4/4: Exporting to text format...\n", "COLMAP reconstruction complete: /content/colmap_workspace/sparse/0\n", "============================================================\n", "Step 3: Preparing Gaussian Splatting data\n", "============================================================\n", "Preparing data for Gaussian Splatting...\n", "Copying images...\n", "Copied 40 images\n", "Converting camera model to PINHOLE format...\n", "Reading camera file: /content/colmap_workspace/sparse/0/cameras.txt\n", "Converted 1 cameras to PINHOLE format\n", "Copied images.txt\n", "Copied points3D.txt\n", "Data preparation complete: /content/2d-gaussian-splatting/data/video\n", "============================================================\n", "Step 4: Training Gaussian Splatting model\n", "============================================================\n", "Model trained at: /content/2d-gaussian-splatting/output/video\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "9GN6Eny2XsAd" }, "id": "9GN6Eny2XsAd", "execution_count": 43, "outputs": [] }, { "cell_type": "markdown", "id": "e17ec719", "metadata": { "papermill": { "duration": 0.49801, "end_time": "2026-01-11T00:00:18.165833", "exception": false, "start_time": "2026-01-11T00:00:17.667823", "status": "completed" }, "tags": [], "id": "e17ec719" }, "source": [] }, { "cell_type": "markdown", "id": "38b3974c", "metadata": { "papermill": { "duration": 0.427583, "end_time": "2026-01-11T00:00:19.008387", "exception": false, "start_time": "2026-01-11T00:00:18.580804", "status": "completed" }, "tags": [], "id": "38b3974c" }, "source": [] } ], "metadata": { "kaggle": { "accelerator": "nvidiaTeslaT4", "dataSources": [ { "databundleVersionId": 5447706, "sourceId": 49349, "sourceType": "competition" }, { "datasetId": 1429416, "sourceId": 14451718, "sourceType": "datasetVersion" } ], "dockerImageVersionId": 31090, "isGpuEnabled": true, "isInternetEnabled": true, "language": "python", "sourceType": "notebook" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.13" }, "papermill": { "default_parameters": {}, "duration": 20573.990788, "end_time": "2026-01-11T00:00:22.081506", "environment_variables": {}, "exception": null, "input_path": "__notebook__.ipynb", "output_path": "__notebook__.ipynb", "parameters": {}, "start_time": "2026-01-10T18:17:28.090718", "version": "2.6.0" }, "colab": { "provenance": [], "gpuType": "T4" }, "accelerator": "GPU" }, "nbformat": 4, "nbformat_minor": 5 }