| #PJM -L rscgrp=b-batch | |
| #PJM -L gpu=1 | |
| #PJM -L elapse=3:00:00 | |
| #PJM -N prompt_selection | |
| #PJM -j | |
| #PJM -o logs/prompt_selection_%j.out | |
| module load cuda/12.2.2 | |
| module load cudnn/8.9.7 | |
| module load gcc-toolset/12 | |
| source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate | |
| cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer | |
| export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256 | |
| echo "==========================================" | |
| echo "Job ID: $PJM_JOBID" | |
| echo "Job Name: $PJM_JOBNAME" | |
| echo "Start: $(date)" | |
| echo "Node: $(hostname)" | |
| echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')" | |
| echo "==========================================" | |
| python code/prompt_selection/run_pipeline.py | |
| echo "==========================================" | |
| echo "Finished: $(date)" | |
| echo "==========================================" | |