model: model_name: meta-llama/Llama-2-7b-hf #facebook/opt-125m #meta-llama/Llama-2-7b-hf #"openai-community/gpt2" #EleutherAI/pythia-160m #Qwen/Qwen2.5-0.5B # model_name: facebook/opt-125m # adapter_path: "./run_all/exnr15/ft2" # adapter_path: './exp_init/run_ex01/ft2' data_collator_mode: 'dynamic' rotation_adapter_config: r: 4 num_rotations: 4 # target_modules: ["q_proj", "v_proj", "v_proj", "o_proj", "gate_proj","up_proj","down_proj"] target_modules: ["q_proj", "v_proj",] data: dataset_name: 'math' split_ratio: 0.025 # path: "./data/gsm8k_test.jsonl" path: ./data/MetaMathQA-40K/MetaMathQA-40K.json # path: ./data/MetaMathQA/MetaMathQA-395K.json dataset_split: train # dataset_field: [question, answer] dataset_field: [query, response] trainer_args: learning_rate: 2e-4 # eval_strategy: steps per_device_train_batch_size: 32 per_device_eval_batch_size: 64 # accumulate_grad_batches: 1 # save_steps: 1000 gradient_checkpointing: False # (Turn off for faster training) output_dir: "./run_exps" # save_path: "runs" report_to: wandb logging_steps: 25 # eval_steps: 100 #dataloader_num_workers: 4 num_train_epochs: 2.0 # max_steps: -1 # device: 'cuda'