feat: PR#10 - EQ-Bench3 ローカルvLLM評価スクリプト
#10
by YUGOROU - opened
- eqbench-ja-run/README.md +91 -0
- eqbench-ja-run/serve_judge.sh +48 -0
- eqbench-ja-run/serve_test.sh +44 -0
- eqbench-ja-run/setup_eqbench_run.sh +134 -0
eqbench-ja-run/README.md
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EQ-Bench3 ローカル評価セットアップ
|
| 2 |
+
|
| 3 |
+
TeenEmo-LFM2.5-1.2B-DPO を EQ-Bench3 日本語版で評価するセットアップ。
|
| 4 |
+
受験者・採点者ともに同一A100インスタンス上のローカルvLLMを使用。
|
| 5 |
+
|
| 6 |
+
## モデル構成
|
| 7 |
+
|
| 8 |
+
| 役割 | モデル | VRAM | ポート |
|
| 9 |
+
|------|--------|------|--------|
|
| 10 |
+
| 受験者 | `LiquidAI/LFM2.5-1.2B-Base` + LoRA `YUGOROU/TeenEmo-LFM2.5-1.2B-DPO` | ~3GB | 8000 |
|
| 11 |
+
| 採点者 | `Qwen/Qwen3.5-35B-A3B` | ~70GB | 8001 |
|
| 12 |
+
|
| 13 |
+
TeenEmoはLoRAアダプタのみ(88.9MB)のため、vLLMはベースモデルを指定して
|
| 14 |
+
`--enable-lora --lora-modules` でアダプタを読み込む。
|
| 15 |
+
|
| 16 |
+
## セットアップ
|
| 17 |
+
|
| 18 |
+
```bash
|
| 19 |
+
export HF_TOKEN="hf_xxxx" && export HF_USERNAME="YUGOROU"
|
| 20 |
+
curl -fL -H "Authorization: Bearer ${HF_TOKEN}" \
|
| 21 |
+
"https://huggingface.co/datasets/YUGOROU/Test-2/resolve/main/eqbench-ja-run/setup_eqbench_run.sh" \
|
| 22 |
+
-o /tmp/setup_eqbench_run.sh && bash /tmp/setup_eqbench_run.sh
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
## 同時起動モード(推奨)
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
# Step 1: TeenEmo(ベース+LoRA)起動(port 8000)
|
| 29 |
+
tmux new-session -d -s eq_run
|
| 30 |
+
tmux new-window -t eq_run -n test
|
| 31 |
+
tmux send-keys -t eq_run:test "cd /workspace/eqbench-run && export HF_TOKEN='hf_xxxx' && ./serve_test.sh" Enter
|
| 32 |
+
|
| 33 |
+
# Step 2: 採点者起動(port 8001)
|
| 34 |
+
tmux new-window -t eq_run -n judge
|
| 35 |
+
tmux send-keys -t eq_run:judge "cd /workspace/eqbench-run && ./serve_judge.sh" Enter
|
| 36 |
+
|
| 37 |
+
# Step 3: 起動確認
|
| 38 |
+
tmux capture-pane -t eq_run:test -p | grep "startup complete"
|
| 39 |
+
tmux capture-pane -t eq_run:judge -p | grep "startup complete"
|
| 40 |
+
|
| 41 |
+
# Step 4: 評価実行
|
| 42 |
+
# --test-model はserve_test.shの LORA_NAME(デフォルト: teenemo-dpo)と一致させる
|
| 43 |
+
cd /workspace/eqbench-run/eqbench3
|
| 44 |
+
python eqbench3.py \
|
| 45 |
+
--test-model teenemo-dpo \
|
| 46 |
+
--model-name TeenEmo-DPO \
|
| 47 |
+
--judge-model Qwen/Qwen3.5-35B-A3B \
|
| 48 |
+
--no-elo \
|
| 49 |
+
--save-interval 1 \
|
| 50 |
+
--iterations 1
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
## 順次実行モード(OOM対策)
|
| 54 |
+
|
| 55 |
+
`--save-interval 1` で1タスクごとに保存。サーバー切り替え後に再実行すると
|
| 56 |
+
完了済みタスクをスキップして再開できる。
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
# Phase 1: TeenEmoで応答生成(port 8000のみ)
|
| 60 |
+
cd /workspace/eqbench-run && ./serve_test.sh &
|
| 61 |
+
cd /workspace/eqbench-run/eqbench3
|
| 62 |
+
python eqbench3.py \
|
| 63 |
+
--test-model teenemo-dpo \
|
| 64 |
+
--model-name TeenEmo-DPO \
|
| 65 |
+
--judge-model Qwen/Qwen3.5-35B-A3B \
|
| 66 |
+
--no-elo --save-interval 1 --iterations 1
|
| 67 |
+
# Judge API失敗は想定内。--save-interval 1 で応答済みタスクは保存される
|
| 68 |
+
|
| 69 |
+
# Phase 2: TeenEmo停止→Judge起動(port 8001)で採点のみ再実行
|
| 70 |
+
pkill -f "vllm serve LiquidAI" 2>/dev/null || kill $(lsof -t -i:8000) 2>/dev/null || true
|
| 71 |
+
JUDGE_GPU_UTIL=0.90 ./serve_judge.sh &
|
| 72 |
+
python eqbench3.py \
|
| 73 |
+
--test-model teenemo-dpo \
|
| 74 |
+
--model-name TeenEmo-DPO \
|
| 75 |
+
--judge-model Qwen/Qwen3.5-35B-A3B \
|
| 76 |
+
--no-elo --save-interval 1 --iterations 1
|
| 77 |
+
# 完了済みタスクはスキップされ採点のみ実行される
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
## 結果確認
|
| 81 |
+
|
| 82 |
+
```bash
|
| 83 |
+
cat /workspace/eqbench-run/eqbench3/eqbench3_runs.json | python3 -c "
|
| 84 |
+
import json, sys
|
| 85 |
+
data = json.load(sys.stdin)
|
| 86 |
+
for run_id, run in data.items():
|
| 87 |
+
if 'TeenEmo' in run_id:
|
| 88 |
+
print('Run:', run_id)
|
| 89 |
+
print('Score:', run.get('eq_bench_score', 'N/A'))
|
| 90 |
+
"
|
| 91 |
+
```
|
eqbench-ja-run/serve_judge.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# =============================================================================
|
| 3 |
+
# serve_judge.sh — Qwen3.5-35B-A3B(採点者)vLLM サーバー起動
|
| 4 |
+
# モデル: Qwen/Qwen3.5-35B-A3B(MoE: 35B全weights/3B active params)
|
| 5 |
+
# ポート: 8001
|
| 6 |
+
# VRAM使用量: ~70GB(全weights bf16)
|
| 7 |
+
#
|
| 8 |
+
# ⚠️ メモリ注意:
|
| 9 |
+
# A100 80GB での同時起動:
|
| 10 |
+
# - TeenEmo (serve_test.sh, GPU_UTIL=0.10) + Judge (GPU_UTIL=0.88) = ~98%
|
| 11 |
+
# - OOM する場合は serve_test.sh を停止してからこちらを起動し、
|
| 12 |
+
# 順次実行モード(setup_eqbench_run.sh の指示に従う)で対応する
|
| 13 |
+
#
|
| 14 |
+
# Qwen3.5-35B-A3B は VLM のため --language-model-only が必要
|
| 15 |
+
# (pipeline_tag: image-text-to-text, HF: https://huggingface.co/Qwen/Qwen3.5-35B-A3B)
|
| 16 |
+
# =============================================================================
|
| 17 |
+
|
| 18 |
+
set -euo pipefail
|
| 19 |
+
|
| 20 |
+
JUDGE_MODEL="${JUDGE_MODEL:-Qwen/Qwen3.5-35B-A3B}"
|
| 21 |
+
PORT="${JUDGE_PORT:-8001}"
|
| 22 |
+
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 23 |
+
GPU_UTIL="${JUDGE_GPU_UTIL:-0.88}"
|
| 24 |
+
MAX_MODEL_LEN="${JUDGE_MAX_MODEL_LEN:-8192}"
|
| 25 |
+
DTYPE="${VLLM_DTYPE:-auto}"
|
| 26 |
+
MAX_NUM_SEQS="${VLLM_MAX_NUM_SEQS:-16}"
|
| 27 |
+
|
| 28 |
+
echo "=== Qwen3.5-35B-A3B 採点者サーバー起動 ==="
|
| 29 |
+
echo " モデル : ${JUDGE_MODEL}"
|
| 30 |
+
echo " ポート : ${PORT}"
|
| 31 |
+
echo " GPU 使用率 : ${GPU_UTIL}"
|
| 32 |
+
echo " ⚠️ TeenEmoと同時起動の場合は serve_test.sh の GPU_UTIL=0.10 を確認してください"
|
| 33 |
+
echo ""
|
| 34 |
+
|
| 35 |
+
python -c "import vllm; print(f' vLLM バージョン : {vllm.__version__}')" 2>/dev/null || true
|
| 36 |
+
echo ""
|
| 37 |
+
|
| 38 |
+
exec vllm serve "${JUDGE_MODEL}" \
|
| 39 |
+
--host "${HOST}" \
|
| 40 |
+
--port "${PORT}" \
|
| 41 |
+
--dtype "${DTYPE}" \
|
| 42 |
+
--gpu-memory-utilization "${GPU_UTIL}" \
|
| 43 |
+
--max-model-len "${MAX_MODEL_LEN}" \
|
| 44 |
+
--tensor-parallel-size 1 \
|
| 45 |
+
--max-num-seqs "${MAX_NUM_SEQS}" \
|
| 46 |
+
--language-model-only \
|
| 47 |
+
--enable-prefix-caching \
|
| 48 |
+
--trust-remote-code
|
eqbench-ja-run/serve_test.sh
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# =============================================================================
|
| 3 |
+
# serve_test.sh — TeenEmo(受験者)vLLM サーバー起動
|
| 4 |
+
#
|
| 5 |
+
# TeenEmo は LiquidAI/LFM2.5-1.2B-Base の LoRAアダプタ(88.9MB)のみを
|
| 6 |
+
# HFに保存しているため、vLLMはベースモデルを指定し --enable-lora で
|
| 7 |
+
# アダプタを動的にロードする形式を使用する。
|
| 8 |
+
#
|
| 9 |
+
# ポート: 8000
|
| 10 |
+
# VRAM使用量: ~3GB(1.2B bf16、GPU_UTIL=0.10で残りをJudge用に確保)
|
| 11 |
+
# =============================================================================
|
| 12 |
+
|
| 13 |
+
set -euo pipefail
|
| 14 |
+
|
| 15 |
+
BASE_MODEL="${TEST_BASE_MODEL:-LiquidAI/LFM2.5-1.2B-Base}"
|
| 16 |
+
LORA_REPO="${TEST_LORA_REPO:-YUGOROU/TeenEmo-LFM2.5-1.2B-DPO}"
|
| 17 |
+
LORA_NAME="${TEST_LORA_NAME:-teenemo-dpo}"
|
| 18 |
+
PORT="${TEST_PORT:-8000}"
|
| 19 |
+
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 20 |
+
GPU_UTIL="${TEST_GPU_UTIL:-0.10}" # 1.2Bは軽量。judge用にVRAMを残す
|
| 21 |
+
MAX_MODEL_LEN="${TEST_MAX_MODEL_LEN:-4096}"
|
| 22 |
+
DTYPE="${VLLM_DTYPE:-auto}"
|
| 23 |
+
HF_TOKEN="${HF_TOKEN:-}"
|
| 24 |
+
|
| 25 |
+
echo "=== TeenEmo 受験者サーバー起動 ==="
|
| 26 |
+
echo " ベースモデル : ${BASE_MODEL}"
|
| 27 |
+
echo " LoRAアダプタ : ${LORA_REPO} (name=${LORA_NAME})"
|
| 28 |
+
echo " ポート : ${PORT}"
|
| 29 |
+
echo " GPU 使用率 : ${GPU_UTIL} (judge用にVRAMを確保)"
|
| 30 |
+
echo ""
|
| 31 |
+
|
| 32 |
+
exec vllm serve "${BASE_MODEL}" \
|
| 33 |
+
--host "${HOST}" \
|
| 34 |
+
--port "${PORT}" \
|
| 35 |
+
--dtype "${DTYPE}" \
|
| 36 |
+
--gpu-memory-utilization "${GPU_UTIL}" \
|
| 37 |
+
--max-model-len "${MAX_MODEL_LEN}" \
|
| 38 |
+
--tensor-parallel-size 1 \
|
| 39 |
+
--max-num-seqs 16 \
|
| 40 |
+
--enable-prefix-caching \
|
| 41 |
+
--enable-lora \
|
| 42 |
+
--lora-modules "${LORA_NAME}=${LORA_REPO}" \
|
| 43 |
+
--trust-remote-code \
|
| 44 |
+
${HF_TOKEN:+--hf-token "${HF_TOKEN}"}
|
eqbench-ja-run/setup_eqbench_run.sh
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# =============================================================================
|
| 3 |
+
# setup_eqbench_run.sh — EQ-Bench3 日本語版 評価実行セットアップ
|
| 4 |
+
#
|
| 5 |
+
# やること:
|
| 6 |
+
# 1. EQ-Bench3 リポジトリのクローン(翻訳済みファイルに差し替え)
|
| 7 |
+
# 2. 依存関係のインストール
|
| 8 |
+
# 3. .env ファイルの生成(ローカルvLLMを向く)
|
| 9 |
+
# 4. 各種 serve スクリプトの配置
|
| 10 |
+
# 5. 実行手順の表示
|
| 11 |
+
# =============================================================================
|
| 12 |
+
|
| 13 |
+
set -euo pipefail
|
| 14 |
+
|
| 15 |
+
WORKSPACE="/workspace/eqbench-run"
|
| 16 |
+
EQBENCH_REPO="https://github.com/EQ-bench/eqbench3.git"
|
| 17 |
+
HF_SCRIPTS="https://huggingface.co/datasets/YUGOROU/Test-2/resolve/main/eqbench-ja-run"
|
| 18 |
+
|
| 19 |
+
echo "[setup] EQ-Bench3 評価環境セットアップ"
|
| 20 |
+
|
| 21 |
+
# ── 依存関係 ──────────────────────────────────────────────────
|
| 22 |
+
echo "[setup] 依存関係のインストール..."
|
| 23 |
+
pip install -q requests python-dotenv tqdm numpy scipy trueskill 2>/dev/null || true
|
| 24 |
+
|
| 25 |
+
# ── EQ-Bench3 クローン ────────────────────────────────────────
|
| 26 |
+
mkdir -p "${WORKSPACE}"
|
| 27 |
+
if [ -d "${WORKSPACE}/eqbench3" ]; then
|
| 28 |
+
echo "[setup] EQ-Bench3 既存リポジトリを更新中..."
|
| 29 |
+
git -C "${WORKSPACE}/eqbench3" pull --ff-only || true
|
| 30 |
+
else
|
| 31 |
+
echo "[setup] EQ-Bench3 をクローン中..."
|
| 32 |
+
git clone --depth=1 "${EQBENCH_REPO}" "${WORKSPACE}/eqbench3"
|
| 33 |
+
fi
|
| 34 |
+
|
| 35 |
+
# ── 翻訳済みファイルをHFからDL(存在する場合)──────────────
|
| 36 |
+
echo "[setup] 翻訳済みシナリオファイルのDL..."
|
| 37 |
+
PREF_DATASET="YUGOROU/teememo-eq-bench-ja"
|
| 38 |
+
for fname in scenario_prompts_ja.txt scenario_notes_ja.txt; do
|
| 39 |
+
OUT="${WORKSPACE}/eqbench3/data/${fname}"
|
| 40 |
+
if curl -fL -H "Authorization: Bearer ${HF_TOKEN}" \
|
| 41 |
+
"https://huggingface.co/datasets/${PREF_DATASET}/resolve/main/data/${fname}" \
|
| 42 |
+
-o "${OUT}" 2>/dev/null; then
|
| 43 |
+
echo "[setup] ✅ ${fname} DL完了"
|
| 44 |
+
else
|
| 45 |
+
echo "[setup] ⚠️ ${fname} は未生成(先に translate_eqbench.py を実行してください)"
|
| 46 |
+
fi
|
| 47 |
+
done
|
| 48 |
+
|
| 49 |
+
# ── serve スクリプトのDL ──────────────────────────────────────
|
| 50 |
+
echo "[setup] serve スクリプトのDL..."
|
| 51 |
+
for script in serve_test.sh serve_judge.sh; do
|
| 52 |
+
curl -fL -H "Authorization: Bearer ${HF_TOKEN}" \
|
| 53 |
+
"${HF_SCRIPTS}/${script}" \
|
| 54 |
+
-o "${WORKSPACE}/${script}"
|
| 55 |
+
chmod +x "${WORKSPACE}/${script}"
|
| 56 |
+
echo "[setup] ✅ ${script}"
|
| 57 |
+
done
|
| 58 |
+
|
| 59 |
+
# ── .env ファイルの生成 ───────────────────────────────────────
|
| 60 |
+
echo "[setup] .env ファイルを生成中..."
|
| 61 |
+
cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF
|
| 62 |
+
# =============================================================================
|
| 63 |
+
# EQ-Bench3 ローカルvLLM設定(自動生成)
|
| 64 |
+
# 受験者: LFM2.5-1.2B-Base + TeenEmo LoRA (port 8000, lora name=teenemo-dpo)
|
| 65 |
+
# 採点者: Qwen3.5-35B-A3B (port 8001)
|
| 66 |
+
# =============================================================================
|
| 67 |
+
|
| 68 |
+
# 受験者: TeenEmo(ローカルvLLM port 8000)
|
| 69 |
+
# モデル名は --lora-modules で指定した name= と一致させる
|
| 70 |
+
TEST_API_URL=http://localhost:8000/v1/chat/completions
|
| 71 |
+
TEST_API_KEY=dummy
|
| 72 |
+
|
| 73 |
+
# 採点者: Qwen3.5-35B-A3B(ローカルvLLM port 8001)
|
| 74 |
+
JUDGE_API_URL=http://localhost:8001/v1/chat/completions
|
| 75 |
+
JUDGE_API_KEY=dummy
|
| 76 |
+
|
| 77 |
+
# API設定
|
| 78 |
+
MAX_RETRIES=3
|
| 79 |
+
RETRY_DELAY=5
|
| 80 |
+
REQUEST_TIMEOUT=300
|
| 81 |
+
ENVEOF
|
| 82 |
+
echo "[setup] ✅ .env 生成完了"
|
| 83 |
+
|
| 84 |
+
# ── シナリオファイルの差し替え(日本語版が存在する場合)──────
|
| 85 |
+
JA_PROMPTS="${WORKSPACE}/eqbench3/data/scenario_prompts_ja.txt"
|
| 86 |
+
EN_PROMPTS="${WORKSPACE}/eqbench3/data/scenario_prompts.txt"
|
| 87 |
+
if [ -f "${JA_PROMPTS}" ]; then
|
| 88 |
+
cp "${EN_PROMPTS}" "${EN_PROMPTS}.bak"
|
| 89 |
+
cp "${JA_PROMPTS}" "${EN_PROMPTS}"
|
| 90 |
+
echo "[setup] ✅ scenario_prompts.txt を日本語版に差し替えました(英語版は .bak として保存)"
|
| 91 |
+
else
|
| 92 |
+
echo "[setup] ⚠️ 日本語版シナリオが未生成のため英語版で動作します"
|
| 93 |
+
fi
|
| 94 |
+
|
| 95 |
+
JA_NOTES="${WORKSPACE}/eqbench3/data/scenario_notes_ja.txt"
|
| 96 |
+
EN_NOTES="${WORKSPACE}/eqbench3/data/scenario_notes.txt"
|
| 97 |
+
if [ -f "${JA_NOTES}" ]; then
|
| 98 |
+
cp "${EN_NOTES}" "${EN_NOTES}.bak"
|
| 99 |
+
cp "${JA_NOTES}" "${EN_NOTES}"
|
| 100 |
+
echo "[setup] ✅ scenario_notes.txt を日本語版に差し替えました"
|
| 101 |
+
fi
|
| 102 |
+
|
| 103 |
+
echo ""
|
| 104 |
+
echo "[setup] =============================="
|
| 105 |
+
echo "[setup] セットアップ完了"
|
| 106 |
+
echo "[setup] 作業ディレクトリ: ${WORKSPACE}"
|
| 107 |
+
echo "[setup] =============================="
|
| 108 |
+
echo ""
|
| 109 |
+
echo "【実行手順】"
|
| 110 |
+
echo ""
|
| 111 |
+
echo "# Step 1: TeenEmo サーバー起動(port 8000)"
|
| 112 |
+
echo "tmux new-session -d -s eq_run"
|
| 113 |
+
echo "tmux new-window -t eq_run -n test"
|
| 114 |
+
echo "tmux send-keys -t eq_run:test \"cd ${WORKSPACE} && export HF_TOKEN='\$HF_TOKEN' && ./serve_test.sh\" Enter"
|
| 115 |
+
echo ""
|
| 116 |
+
echo "# Step 2: 採点者サーバー起動(port 8001)"
|
| 117 |
+
echo "tmux new-window -t eq_run -n judge"
|
| 118 |
+
echo "tmux send-keys -t eq_run:judge \"cd ${WORKSPACE} && ./serve_judge.sh\" Enter"
|
| 119 |
+
echo ""
|
| 120 |
+
echo "# Step 3: 両サーバーの起動を確認"
|
| 121 |
+
echo "tmux capture-pane -t eq_run:test -p | tail -3"
|
| 122 |
+
echo "tmux capture-pane -t eq_run:judge -p | tail -3"
|
| 123 |
+
echo ""
|
| 124 |
+
echo "# Step 4: EQ-Bench3 実行(loraモデル名はserve_test.shのLORA_NAMEと一致)"
|
| 125 |
+
echo "cd ${WORKSPACE}/eqbench3"
|
| 126 |
+
echo "python eqbench3.py \\"
|
| 127 |
+
echo " --test-model teenemo-dpo \\"
|
| 128 |
+
echo " --model-name TeenEmo-DPO \\"
|
| 129 |
+
echo " --judge-model Qwen/Qwen3.5-35B-A3B \\"
|
| 130 |
+
echo " --no-elo \\"
|
| 131 |
+
echo " --save-interval 1 \\"
|
| 132 |
+
echo " --iterations 1"
|
| 133 |
+
echo ""
|
| 134 |
+
echo "⚠️ OOMが発生した場合は順次実行モードを使用してください(README参照)"
|