export OMINI_CONFIG=./config/commonsense.yaml #echo $OMINI_CONFIG export TOKENIZERS_PARALLELISM=true # CUDA Include (/cuda.h) CUDA_INCLUDE_PATH="/home/work/miniconda3/envs/allm/include" # 3. Add into CPATH & CPLUS_INCLUDE_PATH (C/C++ compiler) export CPATH=$CPATH:$CUDA_INCLUDE_PATH export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$CUDA_INCLUDE_PATH # echo "CPATH is set to: $CPATH" # echo "CPLUS_INCLUDE_PATH is set to: $CPLUS_INCLUDE_PATH" export WANDB_PROJECT="Llama2_7B_FT_Math40k_2" export OMP_NUM_THREADS=1 export MKL_NUM_THREADS=1 export OPENBLAS_NUM_THREADS=1 export NUMEXPR_NUM_THREADS=1 date +"%F %T" TEXT=("oft" "boft" "loco" "hra") # TEXT=("oft" "boft" "loco") # --run_text "$text" --dynamo_backend no export ACCELERATE_DYNAMO_BACKEND="no" # accelerate launch --dynamo_backend no --main_process_port 41353 -m src.testLlama \ # --config_path $OMINI_CONFIG --trainer_args.output_dir "./expsXX/" --trainer_args.learning_rate=2e-2 \ # --run_text "loco" --trainer_args.per_device_train_batch_size 16 \ # --rotation_adapter_config.num_rotations 1 --rotation_adapter_config.r 16 \ # --trainer_args.gradient_accumulation_steps 1 \ # --trainer_args.num_train_epochs 3.0 --data.dataset_split train \ # --trainer_args.max_steps=61 \ # --trainer_args.eval_strategy '"no"' \ # --trainer_args.load_best_model_at_end False \ # --trainer_args.save_strategy '"no"' \ # --trainer_args.logging_step 1000 \ # --trainer_args.report_to none date +"%F %T" TEXT=("loco") for text in "${TEXT[@]}"; do accelerate launch --dynamo_backend no --main_process_port 41353 -m src.testLlama \ --config_path $OMINI_CONFIG --trainer_args.output_dir "./expsXX/" --trainer_args.learning_rate=2e-2 \ --run_text $text --trainer_args.per_device_train_batch_size 1 \ --rotation_adapter_config.num_rotations 4 --rotation_adapter_config.r 8 \ --trainer_args.gradient_accumulation_steps 1 \ --trainer_args.num_train_epochs 3.0 --data.dataset_split train \ --trainer_args.max_steps=61 \ --trainer_args.eval_strategy '"no"' \ --trainer_args.load_best_model_at_end False \ --trainer_args.save_strategy '"no"' \ --trainer_args.logging_step 1000 \ --trainer_args.report_to none done date +"%F %T"