OpenThinker3-1.5B-RLVE / configs.yaml
hamishivi's picture
Upload folder using huggingface_hub
e348dc0 verified
raw
history blame contribute delete
988 Bytes
assistant_tag: gpt
bf16: 'True'
content_tag: value
cutoff_len: '16384'
dataloader_num_workers: '4'
dataloader_persistent_workers: 'True'
dataloader_pin_memory: 'True'
dataset: mlfoundations-dev/openthoughts3
dataset_dir: ONLINE
ddp_timeout: '180000000'
deepspeed: /opt/ml/code/zero3.json
do_train: 'True'
enable_liger_kernel: 'True'
finetuning_type: full
formatting: sharegpt
global_batch_size: '256'
gradient_accumulation_steps: '1'
hub_model_id: mlfoundations-dev/openthoughts3_full_qwen25_1b
learning_rate: '0.00016'
logging_steps: '1'
lr_scheduler_type: cosine
messages: conversations
model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct
num_train_epochs: '7.0'
output_dir: /opt/ml/model
overwrite_cache: 'True'
per_device_train_batch_size: '4'
plot_loss: 'True'
preprocessing_num_workers: '16'
push_to_db: 'True'
push_to_hub: 'True'
report_to: wandb
role_tag: from
run_name: openthoughts3_full_qwen25_1b
save_strategy: epoch
stage: sft
template: qwen25
user_tag: human
warmup_ratio: '0.1'