| #!/bin/bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| set -e |
| export HF_TOKEN="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" |
|
|
| BASE_DIR="$HOME/sv |
| _kg/knowledge_drift" |
| DATA_DIR="$BASE_DIR/data" |
| SCRIPT="$BASE_DIR/disentanglement_v2.py" |
| GPU="${CUDA_VISIBLE_DEVICES:-0}" |
|
|
| |
| declare -A MODEL_HF=( |
| ["llama2"]="meta-llama/Llama-2-7b-chat-hf" |
| ["mistral"]="mistralai/Mistral-7B-Instruct-v0.3" |
| ["llama31"]="meta-llama/Llama-3.1-8B-Instruct" |
| ["qwen25"]="Qwen/Qwen2.5-7B-Instruct" |
| ["gemma2"]="google/gemma-2-9b-it" |
| ) |
|
|
| |
| |
| RUN_ORDER=("llama2" "mistral" "llama31" "qwen25" "gemma2") |
|
|
| |
| |
| |
| SKIP_QWEN=false |
| SKIP_EXTRACTION=false |
| SINGLE_MODEL="" |
|
|
| for arg in "$@"; do |
| case $arg in |
| --skip_qwen) SKIP_QWEN=true ;; |
| --skip_extract) SKIP_EXTRACTION=true ;; |
| llama2|mistral|llama31|qwen25|gemma2) |
| SINGLE_MODEL="$arg" ;; |
| *) |
| echo "Unknown arg: $arg" |
| echo "Usage: $0 [--skip_qwen] [--skip_extract] [model_name]" |
| exit 1 ;; |
| esac |
| done |
|
|
| |
| |
| |
| echo "============================================================" |
| echo " MULTI-MODEL DRIFT DETECTION PIPELINE" |
| echo "============================================================" |
| echo " Base dir: $BASE_DIR" |
| echo " GPU: $GPU" |
| echo " Script: $SCRIPT" |
| echo " Skip Qwen: $SKIP_QWEN" |
| echo "" |
|
|
| |
| MISSING=0 |
| for key in "${RUN_ORDER[@]}"; do |
| ds="$DATA_DIR/tier1_${key}.json" |
| if [ ! -f "$ds" ]; then |
| echo " β Missing dataset: $ds" |
| MISSING=$((MISSING + 1)) |
| else |
| n=$(python3 -c "import json; d=json.load(open('$ds')); s=d.get('samples',d); print(len(s))" 2>/dev/null || echo "?") |
| echo " β
$ds ($n samples)" |
| fi |
| done |
|
|
| if [ $MISSING -gt 0 ]; then |
| echo "" |
| echo " Missing $MISSING datasets. Run prepare_all_models.py first:" |
| echo " python prepare_all_models.py --master_dataset data/tier1.json" |
| echo "" |
| |
| fi |
|
|
| echo "============================================================" |
| echo "" |
|
|
| |
| |
| |
| run_model() { |
| local key=$1 |
| local hf_path="${MODEL_HF[$key]}" |
| local dataset="$DATA_DIR/tier1_${key}.json" |
| local output_dir="$DATA_DIR/experiments/tier1_${key}_v2" |
| local cache="$output_dir/cached_states.npz" |
|
|
| echo "" |
| echo "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
| echo "β MODEL: $hf_path" |
| echo "β Dataset: $dataset" |
| echo "β Output: $output_dir" |
| echo "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
| echo "" |
|
|
| |
| if [ ! -f "$dataset" ]; then |
| echo " β οΈ Dataset not found: $dataset β SKIPPING" |
| return 1 |
| fi |
|
|
| |
| local CMD="CUDA_VISIBLE_DEVICES=$GPU python $SCRIPT" |
| CMD="$CMD --model $hf_path" |
| CMD="$CMD --dataset $dataset" |
| CMD="$CMD --output_dir $output_dir" |
|
|
| |
| if [ -f "$cache" ] || [ "$SKIP_EXTRACTION" = true ]; then |
| if [ -f "$cache" ]; then |
| echo " π¦ Cache found: $cache β skipping extraction" |
| fi |
| CMD="$CMD --skip_extraction" |
| fi |
|
|
| echo " βΆ Running: $CMD" |
| echo " βΆ Started: $(date)" |
| local START=$SECONDS |
|
|
| |
| eval $CMD |
|
|
| local ELAPSED=$((SECONDS - START)) |
| local MINS=$((ELAPSED / 60)) |
| local SECS=$((ELAPSED % 60)) |
| echo "" |
| echo " β
$key complete in ${MINS}m ${SECS}s" |
| echo " π Results: $output_dir/final_results.json" |
| echo "" |
| } |
|
|
| |
| |
| |
| TOTAL_START=$SECONDS |
| COMPLETED=0 |
| FAILED=0 |
|
|
| if [ -n "$SINGLE_MODEL" ]; then |
| |
| run_model "$SINGLE_MODEL" && COMPLETED=$((COMPLETED+1)) || FAILED=$((FAILED+1)) |
| else |
| |
| for key in "${RUN_ORDER[@]}"; do |
| if [ "$SKIP_QWEN" = true ] && [ "$key" = "qwen25" ]; then |
| echo " β Skipping Qwen2.5 (already done)" |
| continue |
| fi |
| run_model "$key" && COMPLETED=$((COMPLETED+1)) || FAILED=$((FAILED+1)) |
| done |
| fi |
|
|
| TOTAL_ELAPSED=$((SECONDS - TOTAL_START)) |
| TOTAL_MINS=$((TOTAL_ELAPSED / 60)) |
| TOTAL_HOURS=$((TOTAL_MINS / 60)) |
| REM_MINS=$((TOTAL_MINS % 60)) |
|
|
| echo "" |
| echo "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
| echo "β ALL DONE β" |
| echo "β Completed: $COMPLETED | Failed: $FAILED β" |
| echo "β Total time: ${TOTAL_HOURS}h ${REM_MINS}m β" |
| echo "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
| echo "" |
|
|
| |
| |
| |
| echo "RESULTS SUMMARY:" |
| echo "βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
| printf " %-25s %-8s %-8s %-10s %-10s\n" "Model" "Drift" "Unc" "cos(d,u)" "Best Layer" |
| echo "βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |
|
|
| for key in "${RUN_ORDER[@]}"; do |
| rf="$DATA_DIR/experiments/tier1_${key}_v2/final_results.json" |
| if [ -f "$rf" ]; then |
| python3 -c " |
| import json |
| with open('$rf') as f: r = json.load(f) |
| bl = r['best_layer'] |
| lr = r['layer_results'][str(bl)] |
| print(f\" ${MODEL_HF[$key]:<25s} {lr['drift_auroc']:.4f} {lr['uncertainty_auroc']:.4f} {lr['cos_drift_uncertainty']:+.4f} {bl}\") |
| " 2>/dev/null || echo " ${MODEL_HF[$key]}: (parse error)" |
| else |
| echo " ${MODEL_HF[$key]}: (no results)" |
| fi |
| done |
| echo "βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ" |