| #PJM -L rscgrp=b-batch | |
| #PJM -L gpu=1 | |
| #PJM -L elapse=1:00:00 | |
| #PJM -N ps_shared | |
| #PJM -j | |
| #PJM -o logs/ps_shared_%j.out | |
| module load cuda/12.2.2 | |
| module load cudnn/8.9.7 | |
| module load gcc-toolset/12 | |
| source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate | |
| cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer | |
| export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256 | |
| export HDF5_USE_FILE_LOCKING=FALSE | |
| echo "==========================================" | |
| echo "Job ID: $PJM_JOBID" | |
| echo "Job Name: $PJM_JOBNAME" | |
| echo "Start: $(date)" | |
| echo "Node: $(hostname)" | |
| echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')" | |
| echo "==========================================" | |
| python code/prompt_selection/run_pipeline.py --shared-only | |
| echo "==========================================" | |
| echo "Finished: $(date)" | |
| echo "==========================================" | |