davidrd123 commited on
Commit
327d66e
·
verified ·
1 Parent(s): cdb06a5

Upload 3 files

Browse files
step6500/adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "k",
29
+ "v",
30
+ "o",
31
+ "q",
32
+ "ffn.0",
33
+ "ffn.2"
34
+ ],
35
+ "target_parameters": null,
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_qalora": false,
40
+ "use_rslora": false
41
+ }
step6500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f1cc25d427eeb67cf104e5addc9d9a4c52bbbc4ed88ace5e028b37797e7ab97
3
+ size 613516752
step6500/wan_high.toml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/root/diffusion-pipe/output/graffito_v1_high_noise'
3
+
4
+ # Dataset config file.
5
+ dataset = '/root/diffusion-pipe/my_configs/dataset_graffito.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 1000
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ image_micro_batch_size_per_gpu = 4
18
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
19
+ pipeline_stages = 1
20
+ # Number of micro-batches sent through the pipeline for each training step.
21
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
22
+ gradient_accumulation_steps = 1
23
+ # Grad norm clipping.
24
+ gradient_clipping = 1.0
25
+ # Learning rate warmup.
26
+ warmup_steps = 100
27
+
28
+
29
+
30
+ # eval settings
31
+
32
+ eval_every_n_epochs = 1
33
+ eval_before_first_step = true
34
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
35
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
36
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
37
+ eval_micro_batch_size_per_gpu = 1
38
+ eval_gradient_accumulation_steps = 1
39
+
40
+ # misc settings
41
+
42
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
43
+ save_every_n_epochs = 100
44
+ save_every_n_steps = 250
45
+ # Can checkpoint the traiing state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
46
+ #checkpoint_every_n_epochs = 1
47
+ checkpoint_every_n_minutes = 30
48
+ # Always set to true unless you have a huge amount of VRAM.
49
+ activation_checkpointing = 'unsloth'
50
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
51
+ partition_method = 'parameters'
52
+ # dtype for saving the LoRA or model, if different from training dtype
53
+ save_dtype = 'bfloat16'
54
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
55
+ caching_batch_size = 16
56
+
57
+ # Number of parallel processes to use in map() calls when caching the dataset. Defaults to min(8, num_cpu_cores) if unset.
58
+ # map_num_proc = 4
59
+
60
+ # How often deepspeed logs to console.
61
+ steps_per_print = 1
62
+ # How to extract video clips for training from a single input video file.
63
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
64
+ # number of frames for that bucket.
65
+ # single_beginning: one clip starting at the beginning of the video
66
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
67
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
68
+ # default is single_middle
69
+ video_clip_mode = 'single_middle'
70
+
71
+ # blocks_to_swap = 10
72
+
73
+
74
+ [model]
75
+ type = 'wan'
76
+ # Can load Hunyuan Video entirely from the ckpt path set up for the official inference scripts.
77
+ #ckpt_path = '/home/anon/HunyuanVideo/ckpts'
78
+ ckpt_path = '/root/diffusion-pipe/imagegen_models/wan/Wan2.2-I2V-A14B'
79
+ transformer_path = '/root/diffusion-pipe/imagegen_models/wan/Wan2.2-I2V-A14B/high_noise_model'
80
+ # Or you can load it by pointing to all the ComfyUI files.
81
+ # transformer_path = '/notebooks/diffusion-pipe/imagegen_models/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
82
+ # vae_path = '/notebooks/diffusion-pipe/imagegen_models/hunyuan_video_vae_bf16.safetensors'
83
+ # llm_path = '/notebooks/diffusion-pipe/imagegen_models/llava-llama-3-8b-text-encoder-tokenizer'
84
+ # clip_path = '/notebooks/diffusion-pipe/imagegen_models/clip-vit-large-patch14'
85
+ # Base dtype used for all models.
86
+ dtype = 'bfloat16'
87
+
88
+ transformer_dtype = 'bfloat16'
89
+ min_t = 0.875
90
+ max_t = 1.0
91
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
92
+ timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 64
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 2e-5
107
+ # # type = 'adamw8bitkahan'
108
+ # lr = 4e-5
109
+ # betas = [0.9, 0.99]
110
+ # weight_decay = 0.01
111
+ # eps = 1e-8
112
+
113
+ # type = 'automagic'
114
+ # lr = 1e-6 # Starting learning rate
115
+ # weight_decay = 0.001 # Weight decay
116
+ # lr_bump = 2e-6 # Amount to bump LR when adjusting