| base_model: openchat/openchat_3.5 |
| model_type: MistralForCausalLM |
| tokenizer_type: LlamaTokenizer |
| is_mistral_derived_model: true |
|
|
| load_in_8bit: false |
| load_in_4bit: false |
| strict: false |
|
|
| datasets: |
| - path: FrederikH/LinearAlgebra-Python |
| type: alpaca |
| dataset_prepared_path: |
| val_set_size: 0.05 |
| output_dir: ./out |
|
|
| sequence_len: 8192 |
| sample_packing: true |
| pad_to_sequence_len: true |
|
|
| wandb_project: |
| wandb_entity: |
| wandb_watch: |
| wandb_run_id: |
| wandb_log_model: |
|
|
| gradient_accumulation_steps: 8 |
| micro_batch_size: 4 |
| num_epochs: 4 |
| optimizer: adamw_bnb_8bit |
| lr_scheduler: cosine |
| learning_rate: 0.000005 |
|
|
| train_on_inputs: false |
| group_by_length: false |
| bf16: true |
| fp16: false |
| tf32: false |
|
|
| gradient_checkpointing: true |
| early_stopping_patience: |
| resume_from_checkpoint: |
| local_rank: |
| logging_steps: 1 |
| xformers_attention: |
|
|
| warmup_steps: 10 |
| eval_table_size: |
| eval_table_max_new_tokens: 128 |
|
|
| debug: |
| deepspeed: |
| fsdp: |
| fsdp_config: |
| special_tokens: |
| bos_token: "<s>" |
| eos_token: "</s>" |
| unk_token: "<unk>" |
|
|
| bench_dataset: pharaouk/dharma-1/dharma_1_mini.json |
| do_bench_eval: true |
|
|
| push_to_hub: true |
| hub_model_id: FrederikH/linear-algebra |
| hub_strategy: "every_save" |
| hub_token: |
|
|
| flash_attention: true |
| weight_decay: 0.01 |
|
|
| eval_steps: 1 |
| save_steps: 1 |