From 3949137513f8024331a0a803d8808daa28a048be Mon Sep 17 00:00:00 2001 From: Stephen Murray Date: Fri, 29 Dec 2023 23:29:30 +0000 Subject: [PATCH] feat: add config for mistral/open-instruct --- finetunes/mistral-open-instruct/config.yaml | 78 +++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 finetunes/mistral-open-instruct/config.yaml diff --git a/finetunes/mistral-open-instruct/config.yaml b/finetunes/mistral-open-instruct/config.yaml new file mode 100644 index 0000000..3163648 --- /dev/null +++ b/finetunes/mistral-open-instruct/config.yaml @@ -0,0 +1,78 @@ +base_model: mistralai/Mistral-7B-v0.1 +model_type: MistralForCausalLM +tokenizer_type: LlamaTokenizer +is_mistral_derived_model: true + +load_in_8bit: false +load_in_4bit: true +strict: false + +datasets: + - path: VMware/open-instruct + type: gpteacher +dataset_prepared_path: last_run_prepared +val_set_size: 0.05 +output_dir: ./qlora-out + +adapter: qlora +lora_model_dir: + +sequence_len: 8192 +sample_packing: true +pad_to_sequence_len: true + +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true +lora_fan_in_fan_out: +lora_target_modules: + - gate_proj + - down_proj + - up_proj + - q_proj + - v_proj + - k_proj + - o_proj + +wandb_project: qlora-testing +wandb_entity: +wandb_watch: +wandb_run_id: +wandb_log_model: + +gradient_accumulation_steps: 4 +micro_batch_size: 2 +num_epochs: 1 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +train_on_inputs: false +group_by_length: false +bf16: true +fp16: false +tf32: false + +gradient_checkpointing: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true + +warmup_steps: 10 +eval_steps: 0.05 +eval_table_size: +eval_table_max_new_tokens: 128 +save_steps: +debug: +deepspeed: +weight_decay: 0.0 +fsdp: +fsdp_config: +special_tokens: + bos_token: "" + eos_token: "" + unk_token: ""