feat: PR#11 - EQ-Bench3 macOS評価セットアップ (uv + llama-cpp + HF Inference Providers)
#11
by YUGOROU - opened
- eqbench-mac/setup_eqbench_mac.sh +55 -147
eqbench-mac/setup_eqbench_mac.sh
CHANGED
|
@@ -1,191 +1,99 @@
|
|
| 1 |
#!/usr/bin/env bash
|
| 2 |
-
#
|
| 3 |
-
#
|
| 4 |
-
#
|
| 5 |
-
# 構成:
|
| 6 |
-
# 受験者: YUGOROU/TeenEmo-LFM2.5-1.2B-GGUF (llama-cpp, Metal加速)
|
| 7 |
-
# 採点者: openai/gpt-oss-120b via HF Inference Providers (novita)
|
| 8 |
-
#
|
| 9 |
-
# 前提:
|
| 10 |
-
# - uv がインストール済み (curl -LsSf https://astral.sh/uv/install.sh | sh)
|
| 11 |
-
# - Homebrew がインストール済み
|
| 12 |
-
# - HF_TOKEN: HuggingFace Pro プランのトークン
|
| 13 |
-
#
|
| 14 |
-
# 使用方法:
|
| 15 |
-
# export HF_TOKEN="hf_xxxx"
|
| 16 |
-
# bash setup_eqbench_mac.sh
|
| 17 |
-
# =============================================================================
|
| 18 |
|
| 19 |
set -euo pipefail
|
| 20 |
|
| 21 |
-
HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。
|
| 22 |
WORKSPACE="${HOME}/eqbench-teenemo"
|
| 23 |
-
EQBENCH_REPO="https://github.com/EQ-bench/eqbench3.git"
|
| 24 |
VENV_DIR="${WORKSPACE}/.venv"
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
echo "======================================"
|
| 31 |
echo " EQ-Bench3 macOS セットアップ"
|
| 32 |
echo " 作業ディレクトリ: ${WORKSPACE}"
|
| 33 |
echo "======================================"
|
| 34 |
-
echo ""
|
| 35 |
-
|
| 36 |
-
# ── 1. llama-cpp インストール (Homebrew) ──────────────────────
|
| 37 |
-
echo "[1/6] llama-cpp の確認..."
|
| 38 |
-
if ! command -v llama-server &>/dev/null; then
|
| 39 |
-
echo " llama-cpp をインストール中 (Metal対応)..."
|
| 40 |
-
brew install llama.cpp
|
| 41 |
-
echo " ✅ llama-cpp インストール完了"
|
| 42 |
-
else
|
| 43 |
-
echo " ✅ llama-cpp 確認済み: $(llama-server --version 2>&1 | head -1)"
|
| 44 |
-
fi
|
| 45 |
|
| 46 |
-
|
| 47 |
-
echo ""
|
| 48 |
-
echo "[2/6] ディレクトリ作成..."
|
| 49 |
-
mkdir -p "${WORKSPACE}" "${GGUF_DIR}"
|
| 50 |
|
| 51 |
-
#
|
| 52 |
-
echo ""
|
| 53 |
-
echo "[3/6] EQ-Bench3 クローン..."
|
| 54 |
if [ -d "${WORKSPACE}/eqbench3" ]; then
|
| 55 |
-
|
| 56 |
-
git -C "${WORKSPACE}/eqbench3" pull --ff-only 2>/dev/null || echo " (更新スキップ)"
|
| 57 |
else
|
| 58 |
-
git clone --depth=1
|
| 59 |
fi
|
| 60 |
-
echo "
|
| 61 |
|
| 62 |
-
#
|
| 63 |
-
echo ""
|
| 64 |
-
echo "[4/6] uv 仮想環境セットアップ..."
|
| 65 |
cd "${WORKSPACE}/eqbench3"
|
| 66 |
-
|
| 67 |
-
if [ ! -d "${VENV_DIR}" ]; then
|
| 68 |
-
uv venv "${VENV_DIR}" --python 3.11
|
| 69 |
-
echo " ✅ 仮想環境作成: ${VENV_DIR}"
|
| 70 |
-
fi
|
| 71 |
-
|
| 72 |
-
# requirements.txt から依存パッケージをインストール
|
| 73 |
uv pip install --python "${VENV_DIR}/bin/python" \
|
| 74 |
-
-r requirements.txt
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
echo " ダウンロード中: ${GGUF_REPO}/${GGUF_FILE}"
|
| 88 |
-
"${VENV_DIR}/bin/python" -c "
|
| 89 |
-
from huggingface_hub import hf_hub_download
|
| 90 |
-
import os
|
| 91 |
-
path = hf_hub_download(
|
| 92 |
-
repo_id='${GGUF_REPO}',
|
| 93 |
-
filename='${GGUF_FILE}',
|
| 94 |
-
repo_type='model',
|
| 95 |
-
token='${HF_TOKEN}',
|
| 96 |
-
local_dir='${GGUF_DIR}',
|
| 97 |
)
|
| 98 |
-
print(f
|
| 99 |
-
|
| 100 |
-
fi
|
| 101 |
|
| 102 |
-
#
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
# 受験者: TeenEmo-LFM2.5-1.2B-GGUF (llama-server port ${LLAMA_SERVER_PORT})
|
| 113 |
-
# 採点者: openai/gpt-oss-120b via HF Inference Providers (novita)
|
| 114 |
-
# Input: \$0.05/1M tokens, Output: \$0.25/1M tokens
|
| 115 |
-
# 46シナリオの推定コスト: ~\$0.024 (Pro \$2クレジット内)
|
| 116 |
-
# =============================================================================
|
| 117 |
|
| 118 |
-
#
|
| 119 |
-
|
|
|
|
| 120 |
TEST_API_KEY=dummy
|
| 121 |
-
|
| 122 |
-
# 採点者: HF Inference Providers → novita → gpt-oss-120b
|
| 123 |
-
# HF Proトークン: \$2/月の無料クレジットで賄える
|
| 124 |
JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
|
| 125 |
JUDGE_API_KEY=${HF_TOKEN}
|
| 126 |
-
|
| 127 |
-
# API設定
|
| 128 |
MAX_RETRIES=6
|
| 129 |
RETRY_DELAY=5
|
| 130 |
REQUEST_TIMEOUT=300
|
| 131 |
ENVEOF
|
|
|
|
| 132 |
|
| 133 |
-
echo " ✅ .env 生成完了: ${ENV_FILE}"
|
| 134 |
-
|
| 135 |
-
# ── 日本語版シナリオの差し替え ────────────────────────────────
|
| 136 |
-
echo ""
|
| 137 |
-
echo "[オプション] 日本語版シナリオへの差し替え..."
|
| 138 |
-
JA_PROMPTS_URL="https://huggingface.co/datasets/YUGOROU/teememo-eq-bench-ja/resolve/main/data/scenario_prompts_ja.txt"
|
| 139 |
-
JA_NOTES_URL="https://huggingface.co/datasets/YUGOROU/teememo-eq-bench-ja/resolve/main/data/scenario_notes_ja.txt"
|
| 140 |
-
|
| 141 |
-
for pair in \
|
| 142 |
-
"${JA_PROMPTS_URL}|${WORKSPACE}/eqbench3/data/scenario_prompts.txt" \
|
| 143 |
-
"${JA_NOTES_URL}|${WORKSPACE}/eqbench3/data/scenario_notes.txt"; do
|
| 144 |
-
URL="${pair%%|*}"
|
| 145 |
-
DEST="${pair##*|}"
|
| 146 |
-
BACKUP="${DEST}.en.bak"
|
| 147 |
-
if [ ! -f "${BACKUP}" ]; then
|
| 148 |
-
cp "${DEST}" "${BACKUP}"
|
| 149 |
-
fi
|
| 150 |
-
if curl -fL -H "Authorization: Bearer ${HF_TOKEN}" \
|
| 151 |
-
"${URL}" -o "${DEST}" 2>/dev/null; then
|
| 152 |
-
echo " ✅ 日本語版に差し替え: $(basename ${DEST})"
|
| 153 |
-
else
|
| 154 |
-
echo " ⚠️ 日本語版未生成のため英語版を使用: $(basename ${DEST})"
|
| 155 |
-
cp "${BACKUP}" "${DEST}"
|
| 156 |
-
fi
|
| 157 |
-
done
|
| 158 |
-
|
| 159 |
-
# ── セットアップ完了 ────────────────────────────────────────────
|
| 160 |
echo ""
|
| 161 |
echo "======================================"
|
| 162 |
echo " セットアップ完了"
|
| 163 |
-
echo " 作業ディレクトリ: ${WORKSPACE}"
|
| 164 |
echo "======================================"
|
| 165 |
echo ""
|
| 166 |
echo "【実行手順】"
|
| 167 |
echo ""
|
| 168 |
-
echo "# Step 1: 別タブで
|
| 169 |
-
echo "
|
| 170 |
-
echo "
|
| 171 |
-
echo " --
|
| 172 |
-
echo " --
|
| 173 |
-
echo " -
|
| 174 |
echo ""
|
| 175 |
echo "# Step 2: サーバー起動確認"
|
| 176 |
-
echo "curl -s http://localhost:${
|
| 177 |
echo ""
|
| 178 |
echo "# Step 3: EQ-Bench3 評価実行"
|
| 179 |
-
echo "cd ${WORKSPACE}/eqbench3"
|
| 180 |
-
echo "source ${VENV_DIR}/bin/activate"
|
| 181 |
echo "python eqbench3.py \\"
|
| 182 |
-
echo " --test-model LFM2.5-1.2B-Base
|
| 183 |
echo " --model-name TeenEmo-DPO \\"
|
| 184 |
echo " --judge-model openai/gpt-oss-120b \\"
|
| 185 |
-
echo " --no-elo
|
| 186 |
-
echo " --save-interval 1 \\"
|
| 187 |
-
echo " --iterations 1"
|
| 188 |
-
echo ""
|
| 189 |
-
echo "# 結果確認"
|
| 190 |
-
echo "cat ${WORKSPACE}/eqbench3/eqbench3_runs.json | python3 -c \\"
|
| 191 |
-
echo " \"import json,sys; [print(k, v.get('eq_bench_score','N/A')) for k,v in json.load(sys.stdin).items() if 'TeenEmo' in k]\""
|
|
|
|
| 1 |
#!/usr/bin/env bash
|
| 2 |
+
# EQ-Bench3 macOS / M3 MacBook セットアップ
|
| 3 |
+
# 受験者: TeenEmo MLX (mlx_lm.server, port 8000)
|
| 4 |
+
# 採点者: openai/gpt-oss-120b via HF Inference Providers / novita
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
set -euo pipefail
|
| 7 |
|
| 8 |
+
HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。}"
|
| 9 |
WORKSPACE="${HOME}/eqbench-teenemo"
|
|
|
|
| 10 |
VENV_DIR="${WORKSPACE}/.venv"
|
| 11 |
+
PORT="${LLAMA_SERVER_PORT:-8000}"
|
| 12 |
+
# mlx-community の6bit量子化済みモデル(MLX-native・ダウンロード小・変換不要)
|
| 13 |
+
BASE_MODEL="LiquidAI/LFM2.5-1.2B-Base"
|
| 14 |
+
LORA_REPO="YUGOROU/TeenEmo-LFM2.5-1.2B-DPO"
|
| 15 |
+
LORA_LOCAL="${WORKSPACE}/adapters/teenemo-dpo"
|
| 16 |
|
| 17 |
echo "======================================"
|
| 18 |
echo " EQ-Bench3 macOS セットアップ"
|
| 19 |
echo " 作業ディレクトリ: ${WORKSPACE}"
|
| 20 |
echo "======================================"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
mkdir -p "${WORKSPACE}"
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
# EQ-Bench3 クローン
|
|
|
|
|
|
|
| 25 |
if [ -d "${WORKSPACE}/eqbench3" ]; then
|
| 26 |
+
git -C "${WORKSPACE}/eqbench3" pull --ff-only 2>/dev/null || true
|
|
|
|
| 27 |
else
|
| 28 |
+
git clone --depth=1 https://github.com/EQ-bench/eqbench3.git "${WORKSPACE}/eqbench3"
|
| 29 |
fi
|
| 30 |
+
echo "✅ EQ-Bench3"
|
| 31 |
|
| 32 |
+
# uv 仮想環境 + 依存関係
|
|
|
|
|
|
|
| 33 |
cd "${WORKSPACE}/eqbench3"
|
| 34 |
+
uv venv "${VENV_DIR}" --python 3.11 2>/dev/null || true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
uv pip install --python "${VENV_DIR}/bin/python" \
|
| 36 |
+
-r requirements.txt mlx-lm huggingface_hub
|
| 37 |
+
echo "✅ 依存パッケージ"
|
| 38 |
+
|
| 39 |
+
# LoRAアダプタをローカルにダウンロード
|
| 40 |
+
# mlx_lm.server はHFリポジトリIDを直接受け付けないためローカルパスが必要
|
| 41 |
+
echo "LoRAアダプタをダウンロード中: ${LORA_REPO} → ${LORA_LOCAL}"
|
| 42 |
+
"${VENV_DIR}/bin/python" - << PYEOF
|
| 43 |
+
from huggingface_hub import snapshot_download
|
| 44 |
+
path = snapshot_download(
|
| 45 |
+
repo_id="${LORA_REPO}",
|
| 46 |
+
repo_type="model",
|
| 47 |
+
token="${HF_TOKEN}",
|
| 48 |
+
local_dir="${LORA_LOCAL}",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
)
|
| 50 |
+
print(f"✅ LoRAアダプタ: {path}")
|
| 51 |
+
PYEOF
|
|
|
|
| 52 |
|
| 53 |
+
# 日本語版シナリオ差し替え
|
| 54 |
+
for pair in \
|
| 55 |
+
"scenario_prompts_ja.txt|data/scenario_prompts.txt" \
|
| 56 |
+
"scenario_notes_ja.txt|data/scenario_notes.txt"; do
|
| 57 |
+
SRC="${pair%%|*}"; DEST="${pair##*|}"
|
| 58 |
+
cp "${DEST}" "${DEST}.en.bak" 2>/dev/null || true
|
| 59 |
+
curl -sfL -H "Authorization: Bearer ${HF_TOKEN}" \
|
| 60 |
+
"https://huggingface.co/datasets/YUGOROU/teememo-eq-bench-ja/resolve/main/data/${SRC}" \
|
| 61 |
+
-o "${DEST}" && echo "✅ 日本語版: ${DEST}" || echo "⚠️ 英語版を使用: ${DEST}"
|
| 62 |
+
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
# .env 生成
|
| 65 |
+
cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF
|
| 66 |
+
TEST_API_URL=http://localhost:${PORT}/v1/chat/completions
|
| 67 |
TEST_API_KEY=dummy
|
|
|
|
|
|
|
|
|
|
| 68 |
JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
|
| 69 |
JUDGE_API_KEY=${HF_TOKEN}
|
|
|
|
|
|
|
| 70 |
MAX_RETRIES=6
|
| 71 |
RETRY_DELAY=5
|
| 72 |
REQUEST_TIMEOUT=300
|
| 73 |
ENVEOF
|
| 74 |
+
echo "✅ .env 生成完了"
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
echo ""
|
| 77 |
echo "======================================"
|
| 78 |
echo " セットアップ完了"
|
|
|
|
| 79 |
echo "======================================"
|
| 80 |
echo ""
|
| 81 |
echo "【実行手順】"
|
| 82 |
echo ""
|
| 83 |
+
echo "# Step 1: 別タブでMLXサーバー起動"
|
| 84 |
+
echo "source ${VENV_DIR}/bin/activate"
|
| 85 |
+
echo "mlx_lm.server \\"
|
| 86 |
+
echo " --model ${BASE_MODEL} \\"
|
| 87 |
+
echo " --adapter-path ${LORA_LOCAL} \\"
|
| 88 |
+
echo " --port ${PORT}"
|
| 89 |
echo ""
|
| 90 |
echo "# Step 2: サーバー起動確認"
|
| 91 |
+
echo "curl -s http://localhost:${PORT}/v1/models"
|
| 92 |
echo ""
|
| 93 |
echo "# Step 3: EQ-Bench3 評価実行"
|
| 94 |
+
echo "cd ${WORKSPACE}/eqbench3 && source ${VENV_DIR}/bin/activate"
|
|
|
|
| 95 |
echo "python eqbench3.py \\"
|
| 96 |
+
echo " --test-model LFM2.5-1.2B-Base \\"
|
| 97 |
echo " --model-name TeenEmo-DPO \\"
|
| 98 |
echo " --judge-model openai/gpt-oss-120b \\"
|
| 99 |
+
echo " --no-elo --save-interval 1 --iterations 1"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|