knowledge-drift-experiments / run_all_models.sh
Raniahossam33's picture
Upload folder using huggingface_hub
14b2318 verified
#!/bin/bash
# ============================================================================
# Run Drift Disentanglement on All 5 Models
# ============================================================================
# Usage:
# chmod +x run_all_models.sh
# ./run_all_models.sh # Run all models
# ./run_all_models.sh llama2 # Run single model
# ./run_all_models.sh --skip_qwen # Skip Qwen (already done)
#
# For background execution:
# tmux new-session -d -s models './run_all_models.sh --skip_qwen 2>&1 | tee run_all.log'
# tmux attach -t models # Check progress
# ============================================================================
set -e
export HF_TOKEN="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"
BASE_DIR="$HOME/sv
_kg/knowledge_drift"
DATA_DIR="$BASE_DIR/data"
SCRIPT="$BASE_DIR/disentanglement_v2.py"
GPU="${CUDA_VISIBLE_DEVICES:-0}"
# Model configurations: key|hf_path|dataset|output_dir
declare -A MODEL_HF=(
["llama2"]="meta-llama/Llama-2-7b-chat-hf"
["mistral"]="mistralai/Mistral-7B-Instruct-v0.3"
["llama31"]="meta-llama/Llama-3.1-8B-Instruct"
["qwen25"]="Qwen/Qwen2.5-7B-Instruct"
["gemma2"]="google/gemma-2-9b-it"
)
# Order: run models from largest dataset (most drifted) to smallest
# LLaMA-2 has ~5265 drifted, Mistral/LLaMA-3.1/Qwen ~2313, Gemma ~1178
RUN_ORDER=("llama2" "mistral" "llama31" "qwen25" "gemma2")
# ============================================================================
# Parse arguments
# ============================================================================
SKIP_QWEN=false
SKIP_EXTRACTION=false
SINGLE_MODEL=""
for arg in "$@"; do
case $arg in
--skip_qwen) SKIP_QWEN=true ;;
--skip_extract) SKIP_EXTRACTION=true ;;
llama2|mistral|llama31|qwen25|gemma2)
SINGLE_MODEL="$arg" ;;
*)
echo "Unknown arg: $arg"
echo "Usage: $0 [--skip_qwen] [--skip_extract] [model_name]"
exit 1 ;;
esac
done
# ============================================================================
# Preflight checks
# ============================================================================
echo "============================================================"
echo " MULTI-MODEL DRIFT DETECTION PIPELINE"
echo "============================================================"
echo " Base dir: $BASE_DIR"
echo " GPU: $GPU"
echo " Script: $SCRIPT"
echo " Skip Qwen: $SKIP_QWEN"
echo ""
# Check datasets exist
MISSING=0
for key in "${RUN_ORDER[@]}"; do
ds="$DATA_DIR/tier1_${key}.json"
if [ ! -f "$ds" ]; then
echo " ❌ Missing dataset: $ds"
MISSING=$((MISSING + 1))
else
n=$(python3 -c "import json; d=json.load(open('$ds')); s=d.get('samples',d); print(len(s))" 2>/dev/null || echo "?")
echo " βœ… $ds ($n samples)"
fi
done
if [ $MISSING -gt 0 ]; then
echo ""
echo " Missing $MISSING datasets. Run prepare_all_models.py first:"
echo " python prepare_all_models.py --master_dataset data/tier1.json"
echo ""
# Don't exit β€” maybe they only want to run existing ones
fi
echo "============================================================"
echo ""
# ============================================================================
# Run function
# ============================================================================
run_model() {
local key=$1
local hf_path="${MODEL_HF[$key]}"
local dataset="$DATA_DIR/tier1_${key}.json"
local output_dir="$DATA_DIR/experiments/tier1_${key}_v2"
local cache="$output_dir/cached_states.npz"
echo ""
echo "╔══════════════════════════════════════════════════════════╗"
echo "β•‘ MODEL: $hf_path"
echo "β•‘ Dataset: $dataset"
echo "β•‘ Output: $output_dir"
echo "β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•"
echo ""
# Check if dataset exists
if [ ! -f "$dataset" ]; then
echo " ⚠️ Dataset not found: $dataset β€” SKIPPING"
return 1
fi
# Build command
local CMD="CUDA_VISIBLE_DEVICES=$GPU python $SCRIPT"
CMD="$CMD --model $hf_path"
CMD="$CMD --dataset $dataset"
CMD="$CMD --output_dir $output_dir"
# Skip extraction if cache exists or flag set
if [ -f "$cache" ] || [ "$SKIP_EXTRACTION" = true ]; then
if [ -f "$cache" ]; then
echo " πŸ“¦ Cache found: $cache β€” skipping extraction"
fi
CMD="$CMD --skip_extraction"
fi
echo " β–Ά Running: $CMD"
echo " β–Ά Started: $(date)"
local START=$SECONDS
# Run
eval $CMD
local ELAPSED=$((SECONDS - START))
local MINS=$((ELAPSED / 60))
local SECS=$((ELAPSED % 60))
echo ""
echo " βœ… $key complete in ${MINS}m ${SECS}s"
echo " πŸ“Š Results: $output_dir/final_results.json"
echo ""
}
# ============================================================================
# Execute
# ============================================================================
TOTAL_START=$SECONDS
COMPLETED=0
FAILED=0
if [ -n "$SINGLE_MODEL" ]; then
# Run single model
run_model "$SINGLE_MODEL" && COMPLETED=$((COMPLETED+1)) || FAILED=$((FAILED+1))
else
# Run all models in order
for key in "${RUN_ORDER[@]}"; do
if [ "$SKIP_QWEN" = true ] && [ "$key" = "qwen25" ]; then
echo " ⏭ Skipping Qwen2.5 (already done)"
continue
fi
run_model "$key" && COMPLETED=$((COMPLETED+1)) || FAILED=$((FAILED+1))
done
fi
TOTAL_ELAPSED=$((SECONDS - TOTAL_START))
TOTAL_MINS=$((TOTAL_ELAPSED / 60))
TOTAL_HOURS=$((TOTAL_MINS / 60))
REM_MINS=$((TOTAL_MINS % 60))
echo ""
echo "╔══════════════════════════════════════════════════════════╗"
echo "β•‘ ALL DONE β•‘"
echo "β•‘ Completed: $COMPLETED | Failed: $FAILED β•‘"
echo "β•‘ Total time: ${TOTAL_HOURS}h ${REM_MINS}m β•‘"
echo "β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•"
echo ""
# ============================================================================
# Post-run: Collect results summary
# ============================================================================
echo "RESULTS SUMMARY:"
echo "─────────────────────────────────────────────────────────"
printf " %-25s %-8s %-8s %-10s %-10s\n" "Model" "Drift" "Unc" "cos(d,u)" "Best Layer"
echo "─────────────────────────────────────────────────────────"
for key in "${RUN_ORDER[@]}"; do
rf="$DATA_DIR/experiments/tier1_${key}_v2/final_results.json"
if [ -f "$rf" ]; then
python3 -c "
import json
with open('$rf') as f: r = json.load(f)
bl = r['best_layer']
lr = r['layer_results'][str(bl)]
print(f\" ${MODEL_HF[$key]:<25s} {lr['drift_auroc']:.4f} {lr['uncertainty_auroc']:.4f} {lr['cos_drift_uncertainty']:+.4f} {bl}\")
" 2>/dev/null || echo " ${MODEL_HF[$key]}: (parse error)"
else
echo " ${MODEL_HF[$key]}: (no results)"
fi
done
echo "─────────────────────────────────────────────────────────"