#!/bin/bash # MODEL_NAME="Qwen/Qwen2-VL-7B-Instruct" # MODEL_NAME="Qwen/Qwen2-VL-2B-Instruct" # MODEL_NAME="Qwen/Qwen2.5-VL-3B-Instruct" # MODEL_NAME="Qwen/Qwen2.5-VL-7B-Instruct" MODEL_NAME="/DATA/disk1/cdp/hf_models/Qwen3-VL-8B-Instruct" export PYTHONPATH=src:$PYTHONPATH GLOBAL_BATCH_SIZE=128 BATCH_PER_DEVICE=4 NUM_DEVICES=8 GRAD_ACCUM_STEPS=$((GLOBAL_BATCH_SIZE / (BATCH_PER_DEVICE * NUM_DEVICES))) # If you want to tune the `embed_token` with LoRA, You need to tune `lm_head` together # If you want to set the min pixels and max pixels for Qwen3-VL, You should set as (N * 32 * 32) deepspeed src/train/train_sft.py \ --use_liger_kernel False \ --lora_enable True \ --use_dora False \ --lora_namespan_exclude "['lm_head', 'embed_tokens']" \ --lora_rank 32 \ --lora_alpha 64 \ --lora_dropout 0.05 \ --num_lora_modules -1 \ --deepspeed scripts/zero3.json \ --model_id $MODEL_NAME \ --data_path /DATA/disk1/cdp/hxa/dataset/dataset_merged_en_formatted.json \ --image_folder /DATA/disk1/cdp/hxa/dataset \ --remove_unused_columns False \ --freeze_vision_tower False \ --freeze_llm True \ --freeze_merger False \ --bf16 True \ --fp16 False \ --disable_flash_attn2 False \ --output_dir output/testing_lora_multi_qa_8b \ --num_train_epochs 8 \ --per_device_train_batch_size $BATCH_PER_DEVICE \ --gradient_accumulation_steps $GRAD_ACCUM_STEPS \ --image_min_pixels $((256 * 28 * 28)) \ --image_max_pixels $((1280 * 28 * 28)) \ --learning_rate 1e-4 \ --merger_lr 1e-5 \ --vision_lr 2e-6 \ --weight_decay 0.1 \ --warmup_ratio 0.03 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --tf32 True \ --gradient_checkpointing True \ --report_to tensorboard \ --lazy_preprocess True \ --save_strategy "steps" \ --save_steps 100 \ --save_total_limit 50 \ --dataloader_num_workers 4