Luka512 commited on
Commit
de37aeb
·
verified ·
1 Parent(s): 07345e2

Upload folder using huggingface_hub

Browse files
CosyVoice-BlankEN/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 896,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 4864,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 24,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 14,
16
+ "num_hidden_layers": 24,
17
+ "num_key_value_heads": 2,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": 32768,
21
+ "tie_word_embeddings": true,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.40.1",
24
+ "use_cache": true,
25
+ "use_sliding_window": false,
26
+ "vocab_size": 151936
27
+ }
CosyVoice-BlankEN/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_p": 0.8,
12
+ "top_k": 20,
13
+ "transformers_version": "4.37.0"
14
+ }
CosyVoice-BlankEN/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
CosyVoice-BlankEN/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130282af0dfa9fe5840737cc49a0d339d06075f83c5a315c3372c9a0740d0b96
3
+ size 988097824
CosyVoice-BlankEN/tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
+ "bos_token": null,
31
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|im_end|>",
34
+ "errors": "replace",
35
+ "model_max_length": 32768,
36
+ "pad_token": "<|endoftext|>",
37
+ "split_special_tokens": false,
38
+ "tokenizer_class": "Qwen2Tokenizer",
39
+ "unk_token": null
40
+ }
CosyVoice-BlankEN/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
cosyvoice2.yaml ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # set random seed, so that you may reproduce your result.
2
+ __set_seed1: !apply:random.seed [1986]
3
+ __set_seed2: !apply:numpy.random.seed [1986]
4
+ __set_seed3: !apply:torch.manual_seed [1986]
5
+ __set_seed4: !apply:torch.cuda.manual_seed_all [1986]
6
+
7
+ # fixed params
8
+ sample_rate: 24000
9
+ llm_input_size: 896
10
+ llm_output_size: 896
11
+ spk_embed_dim: 192
12
+ qwen_pretrain_path: ''
13
+ token_frame_rate: 25
14
+ token_mel_ratio: 2
15
+
16
+ # stream related params
17
+ chunk_size: 25 # streaming inference chunk size, in token
18
+ num_decoding_left_chunks: -1 # streaming inference flow decoder left chunk size, <0 means use all left chunks
19
+
20
+ # model params
21
+ # for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
22
+ # for system/third_party class/function, we do not require this.
23
+ llm: !new:cosyvoice.llm.llm.Qwen2LM
24
+ llm_input_size: !ref <llm_input_size>
25
+ llm_output_size: !ref <llm_output_size>
26
+ speech_token_size: 6561
27
+ length_normalized_loss: True
28
+ lsm_weight: 0
29
+ mix_ratio: [5, 15]
30
+ llm: !new:cosyvoice.llm.llm.Qwen2Encoder
31
+ pretrain_path: !ref <qwen_pretrain_path>
32
+ sampling: !name:cosyvoice.utils.common.ras_sampling
33
+ top_p: 0.8
34
+ top_k: 25
35
+ win_size: 10
36
+ tau_r: 0.1
37
+
38
+ flow: !new:cosyvoice.flow.flow.CausalMaskedDiffWithXvec
39
+ input_size: 512
40
+ output_size: 80
41
+ spk_embed_dim: !ref <spk_embed_dim>
42
+ output_type: 'mel'
43
+ vocab_size: 6561
44
+ input_frame_rate: !ref <token_frame_rate>
45
+ only_mask_loss: True
46
+ token_mel_ratio: !ref <token_mel_ratio>
47
+ pre_lookahead_len: 3
48
+ encoder: !new:cosyvoice.transformer.upsample_encoder.UpsampleConformerEncoder
49
+ output_size: 512
50
+ attention_heads: 8
51
+ linear_units: 2048
52
+ num_blocks: 6
53
+ dropout_rate: 0.1
54
+ positional_dropout_rate: 0.1
55
+ attention_dropout_rate: 0.1
56
+ normalize_before: True
57
+ input_layer: 'linear'
58
+ pos_enc_layer_type: 'rel_pos_espnet'
59
+ selfattention_layer_type: 'rel_selfattn'
60
+ input_size: 512
61
+ use_cnn_module: False
62
+ macaron_style: False
63
+ static_chunk_size: !ref <chunk_size>
64
+ decoder: !new:cosyvoice.flow.flow_matching.CausalConditionalCFM
65
+ in_channels: 240
66
+ n_spks: 1
67
+ spk_emb_dim: 80
68
+ cfm_params: !new:omegaconf.DictConfig
69
+ content:
70
+ sigma_min: 1e-06
71
+ solver: 'euler'
72
+ t_scheduler: 'cosine'
73
+ training_cfg_rate: 0.2
74
+ inference_cfg_rate: 0.7
75
+ reg_loss_type: 'l1'
76
+ estimator: !new:cosyvoice.flow.decoder.CausalConditionalDecoder
77
+ in_channels: 320
78
+ out_channels: 80
79
+ channels: [256]
80
+ dropout: 0.0
81
+ attention_head_dim: 64
82
+ n_blocks: 4
83
+ num_mid_blocks: 12
84
+ num_heads: 8
85
+ act_fn: 'gelu'
86
+ static_chunk_size: !ref <chunk_size> * <token_mel_ratio>
87
+ num_decoding_left_chunks: !ref <num_decoding_left_chunks>
88
+
89
+ hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
90
+ in_channels: 80
91
+ base_channels: 512
92
+ nb_harmonics: 8
93
+ sampling_rate: !ref <sample_rate>
94
+ nsf_alpha: 0.1
95
+ nsf_sigma: 0.003
96
+ nsf_voiced_threshold: 10
97
+ upsample_rates: [8, 5, 3]
98
+ upsample_kernel_sizes: [16, 11, 7]
99
+ istft_params:
100
+ n_fft: 16
101
+ hop_len: 4
102
+ resblock_kernel_sizes: [3, 7, 11]
103
+ resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
104
+ source_resblock_kernel_sizes: [7, 7, 11]
105
+ source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
106
+ lrelu_slope: 0.1
107
+ audio_limit: 0.99
108
+ f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
109
+ num_class: 1
110
+ in_channels: 80
111
+ cond_channels: 512
112
+
113
+ # gan related module
114
+ mel_spec_transform1: !name:matcha.utils.audio.mel_spectrogram
115
+ n_fft: 1920
116
+ num_mels: 80
117
+ sampling_rate: !ref <sample_rate>
118
+ hop_size: 480
119
+ win_size: 1920
120
+ fmin: 0
121
+ fmax: null
122
+ center: False
123
+ hifigan: !new:cosyvoice.hifigan.hifigan.HiFiGan
124
+ generator: !ref <hift>
125
+ discriminator: !new:cosyvoice.hifigan.discriminator.MultipleDiscriminator
126
+ mpd: !new:matcha.hifigan.models.MultiPeriodDiscriminator
127
+ mrd: !new:cosyvoice.hifigan.discriminator.MultiResSpecDiscriminator
128
+ mel_spec_transform: [
129
+ !ref <mel_spec_transform1>
130
+ ]
131
+
132
+ # processor functions
133
+ parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
134
+ get_tokenizer: !name:cosyvoice.tokenizer.tokenizer.get_qwen_tokenizer
135
+ token_path: !ref <qwen_pretrain_path>
136
+ skip_special_tokens: True
137
+ allowed_special: 'all'
138
+ tokenize: !name:cosyvoice.dataset.processor.tokenize
139
+ get_tokenizer: !ref <get_tokenizer>
140
+ allowed_special: !ref <allowed_special>
141
+ filter: !name:cosyvoice.dataset.processor.filter
142
+ max_length: 40960
143
+ min_length: 100
144
+ token_max_length: 200
145
+ token_min_length: 1
146
+ resample: !name:cosyvoice.dataset.processor.resample
147
+ resample_rate: !ref <sample_rate>
148
+ truncate: !name:cosyvoice.dataset.processor.truncate
149
+ truncate_length: 24480 # must be a multiplier of hop_size
150
+ feat_extractor: !name:matcha.utils.audio.mel_spectrogram
151
+ n_fft: 1920
152
+ num_mels: 80
153
+ sampling_rate: !ref <sample_rate>
154
+ hop_size: 480
155
+ win_size: 1920
156
+ fmin: 0
157
+ fmax: 8000
158
+ center: False
159
+ compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
160
+ feat_extractor: !ref <feat_extractor>
161
+ token_mel_ratio: 2
162
+ compute_f0: !name:cosyvoice.dataset.processor.compute_f0
163
+ sample_rate: !ref <sample_rate>
164
+ hop_size: 480
165
+ parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
166
+ normalize: True
167
+ shuffle: !name:cosyvoice.dataset.processor.shuffle
168
+ shuffle_size: 1000
169
+ sort: !name:cosyvoice.dataset.processor.sort
170
+ sort_size: 500 # sort_size should be less than shuffle_size
171
+ batch: !name:cosyvoice.dataset.processor.batch
172
+ batch_type: 'dynamic'
173
+ max_frames_in_batch: 4000
174
+ padding: !name:cosyvoice.dataset.processor.padding
175
+ use_spk_embedding: False # change to True during sft
176
+
177
+
178
+ # dataset processor pipeline
179
+ data_pipeline: [
180
+ !ref <parquet_opener>,
181
+ !ref <tokenize>,
182
+ !ref <filter>,
183
+ !ref <resample>,
184
+ !ref <compute_fbank>,
185
+ !ref <parse_embedding>,
186
+ !ref <shuffle>,
187
+ !ref <sort>,
188
+ !ref <batch>,
189
+ !ref <padding>,
190
+ ]
191
+ data_pipeline_gan: [
192
+ !ref <parquet_opener>,
193
+ !ref <tokenize>,
194
+ !ref <filter>,
195
+ !ref <resample>,
196
+ !ref <truncate>,
197
+ !ref <compute_fbank>,
198
+ !ref <compute_f0>,
199
+ !ref <parse_embedding>,
200
+ !ref <shuffle>,
201
+ !ref <sort>,
202
+ !ref <batch>,
203
+ !ref <padding>,
204
+ ]
205
+
206
+ # llm flow train conf
207
+ train_conf:
208
+ optim: adam
209
+ optim_conf:
210
+ lr: 1e-5 # change to 1e-5 during sft
211
+ scheduler: constantlr # change to constantlr during sft
212
+ scheduler_conf:
213
+ warmup_steps: 2500
214
+ max_epoch: 200 # 200
215
+ grad_clip: 5
216
+ accum_grad: 2
217
+ log_interval: 100
218
+ save_per_step: 3000 # -1 this is where you can set the step-wise validation checkpoint, -1 means no step-wise validation checkpoint
219
+
220
+ # gan train conf
221
+ train_conf_gan:
222
+ optim: adam
223
+ optim_conf:
224
+ lr: 0.0002 # use small lr for gan training
225
+ scheduler: constantlr
226
+ optim_d: adam
227
+ optim_conf_d:
228
+ lr: 0.0002 # use small lr for gan training
229
+ scheduler_d: constantlr
230
+ max_epoch: 200
231
+ grad_clip: 5
232
+ accum_grad: 1 # in gan training, accum_grad must be 1
233
+ log_interval: 100
234
+ save_per_step: -1
hifigan.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04936834f1bdcfeb0973203bb53b9f8ea652b779a06f3ff3958ee7693d7bef96
3
+ size 248993278
hift.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04936834f1bdcfeb0973203bb53b9f8ea652b779a06f3ff3958ee7693d7bef96
3
- size 248993278
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1954fcb24d868239cbcd7b24569d6d4725a41f7d214c7c8f392503ca4ae150bb
3
+ size 83368254
llm.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60f51526cf2f6011c20aeeeb1253b22432b0bd022e61c87202a0cc4f0799173d
3
- size 2023319048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bc7ff1b5379c592982455777c1ac823c3e6ddefb53e2f0143d352c338f6de7e
3
+ size 2567851323