#!/bin/bash # Script to pre-download the Llama model on a login node (which has internet access) # Run this BEFORE submitting the Slurm job set -euo pipefail MODEL_NAME="${1:-meta-llama/Llama-3.2-3B-Instruct}" HF_HOME="${HF_HOME:-/scratch/user/hangxiao/huggingface_cache}" echo "Downloading model: ${MODEL_NAME}" echo "Cache directory: ${HF_HOME}" export HF_HOME export TRANSFORMERS_CACHE="${HF_HOME}" export HF_TOKEN="${HF_TOKEN:-}" mkdir -p "${HF_HOME}" source /sw/eb/sw/Anaconda3/2024.02-1/etc/profile.d/conda.sh conda activate /scratch/user/hangxiao/.conda/envs/paper_impact # Use huggingface-cli to download the model (simpler and faster) if command -v huggingface-cli &> /dev/null; then echo "Using huggingface-cli to download model..." huggingface-cli download "${MODEL_NAME}" \ --cache-dir "${HF_HOME}" \ --token "${HF_TOKEN:-}" \ --local-dir-use-symlinks False else # Fallback: Use Python to download via transformers echo "huggingface-cli not found, using transformers library..." python <