Upload 25 files
Browse files- .gitattributes +5 -0
- preprocessors/.gitattributes +40 -0
- preprocessors/README.md +3 -0
- preprocessors/dereverb_mel_band_roformer/dereverb_mel_band_roformer_anvuew.yaml +76 -0
- preprocessors/dereverb_mel_band_roformer/dereverb_mel_band_roformer_anvuew_sdr_19.1729.ckpt +3 -0
- preprocessors/mel-band-roformer-karaoke/config_karaoke_becruily.yaml +72 -0
- preprocessors/mel-band-roformer-karaoke/mel_band_roformer_karaoke_becruily.ckpt +3 -0
- preprocessors/parakeet-tdt-0.6b-v2/parakeet-tdt-0.6b-v2.nemo +3 -0
- preprocessors/rmvpe/rmvpe.pt +3 -0
- preprocessors/rosvot/rmvpe/model.pt +3 -0
- preprocessors/rosvot/rosvot/config.yaml +159 -0
- preprocessors/rosvot/rosvot/model.pt +3 -0
- preprocessors/rosvot/rwbd/config.yaml +171 -0
- preprocessors/rosvot/rwbd/model.pt +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md +357 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/am.mvn +8 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/asr_example_hotword.wav +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/config.yaml +160 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/configuration.json +14 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/hotword.txt +1 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/res.png +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/seaco.png +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pt +3 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/seg_dict +0 -0
- preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/tokens.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
preprocessors/parakeet-tdt-0.6b-v2/parakeet-tdt-0.6b-v2.nemo filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/asr_example_hotword.wav filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/res.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/seaco.png filter=lfs diff=lfs merge=lfs -text
|
preprocessors/.gitattributes
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
parakeet-tdt-0.6b-v2/parakeet-tdt-0.6b-v2.nemo filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/asr_example_hotword.wav filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/res.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/seaco.png filter=lfs diff=lfs merge=lfs -text
|
preprocessors/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
preprocessors/dereverb_mel_band_roformer/dereverb_mel_band_roformer_anvuew.yaml
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 352800
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 256
|
| 5 |
+
hop_length: 441
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 384
|
| 13 |
+
depth: 6
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 1
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
num_bands: 60
|
| 19 |
+
dim_head: 64
|
| 20 |
+
heads: 8
|
| 21 |
+
attn_dropout: 0
|
| 22 |
+
ff_dropout: 0
|
| 23 |
+
flash_attn: True
|
| 24 |
+
dim_freqs_in: 1025
|
| 25 |
+
sample_rate: 44100 # needed for mel filter bank from librosa
|
| 26 |
+
stft_n_fft: 2048
|
| 27 |
+
stft_hop_length: 441
|
| 28 |
+
stft_win_length: 2048
|
| 29 |
+
stft_normalized: False
|
| 30 |
+
mask_estimator_depth: 2
|
| 31 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 32 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 33 |
+
- 4096
|
| 34 |
+
- 2048
|
| 35 |
+
- 1024
|
| 36 |
+
- 512
|
| 37 |
+
- 256
|
| 38 |
+
multi_stft_hop_size: 147
|
| 39 |
+
multi_stft_normalized: False
|
| 40 |
+
|
| 41 |
+
training:
|
| 42 |
+
batch_size: 3
|
| 43 |
+
gradient_accumulation_steps: 1
|
| 44 |
+
grad_clip: 0
|
| 45 |
+
instruments:
|
| 46 |
+
- noreverb
|
| 47 |
+
- reverb
|
| 48 |
+
lr: 5.0e-05
|
| 49 |
+
patience: 2
|
| 50 |
+
reduce_factor: 0.95
|
| 51 |
+
target_instrument: noreverb
|
| 52 |
+
num_epochs: 1000
|
| 53 |
+
num_steps: 4000
|
| 54 |
+
q: 0.95
|
| 55 |
+
coarse_loss_clip: false
|
| 56 |
+
ema_momentum: 0.999
|
| 57 |
+
optimizer: adamw
|
| 58 |
+
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
|
| 59 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 60 |
+
|
| 61 |
+
augmentations:
|
| 62 |
+
enable: true # enable or disable all augmentations (to fast disable if needed)
|
| 63 |
+
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
|
| 64 |
+
loudness_min: 0.1
|
| 65 |
+
loudness_max: 1.0
|
| 66 |
+
mixup: false # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
|
| 67 |
+
mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
|
| 68 |
+
- 0.2
|
| 69 |
+
- 0.02
|
| 70 |
+
mixup_loudness_min: 0.5
|
| 71 |
+
mixup_loudness_max: 1.5
|
| 72 |
+
|
| 73 |
+
inference:
|
| 74 |
+
batch_size: 8
|
| 75 |
+
dim_t: 801
|
| 76 |
+
num_overlap: 2
|
preprocessors/dereverb_mel_band_roformer/dereverb_mel_band_roformer_anvuew_sdr_19.1729.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9262877b87e9ebb0fb808a456b0a411fa677f5df31c8383c1254af531c078970
|
| 3 |
+
size 913107578
|
preprocessors/mel-band-roformer-karaoke/config_karaoke_becruily.yaml
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 485100
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 256
|
| 5 |
+
hop_length: 441
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 384
|
| 13 |
+
depth: 6
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 2
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
num_bands: 60
|
| 19 |
+
dim_head: 64
|
| 20 |
+
heads: 8
|
| 21 |
+
attn_dropout: 0
|
| 22 |
+
ff_dropout: 0
|
| 23 |
+
flash_attn: true
|
| 24 |
+
dim_freqs_in: 1025
|
| 25 |
+
sample_rate: 44100 # needed for mel filter bank from librosa
|
| 26 |
+
stft_n_fft: 2048
|
| 27 |
+
stft_hop_length: 441
|
| 28 |
+
stft_win_length: 2048
|
| 29 |
+
stft_normalized: false
|
| 30 |
+
mask_estimator_depth: 2
|
| 31 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 32 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 33 |
+
- 4096
|
| 34 |
+
- 2048
|
| 35 |
+
- 1024
|
| 36 |
+
- 512
|
| 37 |
+
- 256
|
| 38 |
+
multi_stft_hop_size: 147
|
| 39 |
+
multi_stft_normalized: false
|
| 40 |
+
|
| 41 |
+
training:
|
| 42 |
+
batch_size: 1
|
| 43 |
+
gradient_accumulation_steps: 1
|
| 44 |
+
grad_clip: 0
|
| 45 |
+
instruments:
|
| 46 |
+
- Vocals
|
| 47 |
+
- Instrumental
|
| 48 |
+
lr: 0.0005
|
| 49 |
+
patience: 2
|
| 50 |
+
reduce_factor: 0.95
|
| 51 |
+
target_instrument: null
|
| 52 |
+
num_epochs: 1000
|
| 53 |
+
num_steps: 1000
|
| 54 |
+
augmentation: false # enable augmentations by audiomentations and pedalboard
|
| 55 |
+
augmentation_type:
|
| 56 |
+
use_mp3_compress: false # Deprecated
|
| 57 |
+
augmentation_mix: false # Mix several stems of the same type with some probability
|
| 58 |
+
augmentation_loudness: false # randomly change loudness of each stem
|
| 59 |
+
augmentation_loudness_type: 1 # Type 1 or 2
|
| 60 |
+
augmentation_loudness_min: 0
|
| 61 |
+
augmentation_loudness_max: 0
|
| 62 |
+
q: 0.95
|
| 63 |
+
coarse_loss_clip: false
|
| 64 |
+
ema_momentum: 0.999
|
| 65 |
+
optimizer: adamw
|
| 66 |
+
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
|
| 67 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 68 |
+
|
| 69 |
+
inference:
|
| 70 |
+
batch_size: 8
|
| 71 |
+
dim_t: 1101
|
| 72 |
+
num_overlap: 2
|
preprocessors/mel-band-roformer-karaoke/mel_band_roformer_karaoke_becruily.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3aa262ac01df870b9fc033e9c7b6cad33fe04fc9c148b6c40841326a515a0e0
|
| 3 |
+
size 1719139254
|
preprocessors/parakeet-tdt-0.6b-v2/parakeet-tdt-0.6b-v2.nemo
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d99e39955c9d3d0350d8fb7c75e40c64a2b2eaeb003883d7c941fd2e8747b28c
|
| 3 |
+
size 2472222720
|
preprocessors/rmvpe/rmvpe.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d62215f4306e3ca278246188607209f09af3dc77ed4232efdd069798c4ec193
|
| 3 |
+
size 181184272
|
preprocessors/rosvot/rmvpe/model.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19dc1809cf4cdb0a18db93441816bc327e14e5644b72eeaae5220560c6736fe2
|
| 3 |
+
size 368492925
|
preprocessors/rosvot/rosvot/config.yaml
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accumulate_grad_batches: 1
|
| 2 |
+
amp: false
|
| 3 |
+
audio_num_mel_bins: 80
|
| 4 |
+
audio_sample_rate: 24000
|
| 5 |
+
base_config:
|
| 6 |
+
- ./base.yaml
|
| 7 |
+
binarization_args:
|
| 8 |
+
min_sil_duration: 0.1
|
| 9 |
+
shuffle: false
|
| 10 |
+
test_range:
|
| 11 |
+
- 0
|
| 12 |
+
- 100
|
| 13 |
+
train_range:
|
| 14 |
+
- 200
|
| 15 |
+
- -1
|
| 16 |
+
trim_eos_bos: false
|
| 17 |
+
valid_range:
|
| 18 |
+
- 100
|
| 19 |
+
- 200
|
| 20 |
+
with_align: true
|
| 21 |
+
with_f0: true
|
| 22 |
+
with_f0cwt: false
|
| 23 |
+
with_linear: false
|
| 24 |
+
with_mel: false
|
| 25 |
+
with_spk_embed: false
|
| 26 |
+
with_w2v2_feat: false
|
| 27 |
+
with_wav: true
|
| 28 |
+
binarizer_cls: data_gen.rosvot_binarizer.ROSVOTBinarizer
|
| 29 |
+
binary_data_dir: data/binary/m4
|
| 30 |
+
bkb_layers: 2
|
| 31 |
+
bkb_net: conformer
|
| 32 |
+
channel_multiples: 1-1-1-1
|
| 33 |
+
check_val_every_n_epoch: 10
|
| 34 |
+
clip_grad_norm: 1
|
| 35 |
+
clip_grad_value: 0
|
| 36 |
+
conformer_kernel: 9
|
| 37 |
+
dataset_downsample_rate: 1.0
|
| 38 |
+
debug: false
|
| 39 |
+
dropout: 0.1
|
| 40 |
+
ds_names: m4
|
| 41 |
+
ds_names_in_testing: ''
|
| 42 |
+
ds_names_in_training: ''
|
| 43 |
+
ds_workers: 8
|
| 44 |
+
endless_ds: true
|
| 45 |
+
eval_max_batches: -1
|
| 46 |
+
f0_add_noise: gaussian:0.04
|
| 47 |
+
f0_bin: 512
|
| 48 |
+
f0_filepath: ''
|
| 49 |
+
f0_max: 12000
|
| 50 |
+
f0_min: 30
|
| 51 |
+
fft_size: 512
|
| 52 |
+
find_unused_parameters: true
|
| 53 |
+
fmax: 12000
|
| 54 |
+
fmin: 30
|
| 55 |
+
frames_multiple: 16
|
| 56 |
+
gen_dir_name: ''
|
| 57 |
+
hidden_size: 256
|
| 58 |
+
hop_size: 128
|
| 59 |
+
infer_meta_path: ''
|
| 60 |
+
infer_print_skipped: true
|
| 61 |
+
infer_regulate_real_note_itv: true
|
| 62 |
+
input_process_name: none
|
| 63 |
+
label_pos_weight_decay: 0.95
|
| 64 |
+
lambda_note_bd: 1.0
|
| 65 |
+
lambda_note_bd_focal: 3.0
|
| 66 |
+
lambda_note_bd_slur_punish: 0.0
|
| 67 |
+
lambda_note_pitch: 1.0
|
| 68 |
+
load_ckpt: ''
|
| 69 |
+
loud_norm: false
|
| 70 |
+
lr: 1.0e-05
|
| 71 |
+
max_epochs: 1000
|
| 72 |
+
max_frames: 4000
|
| 73 |
+
max_input_tokens: 1550
|
| 74 |
+
max_sentences: 32
|
| 75 |
+
max_tokens: 60000
|
| 76 |
+
max_updates: 60000
|
| 77 |
+
max_valid_sentences: 1
|
| 78 |
+
max_valid_tokens: 60000
|
| 79 |
+
mel_add_noise: gaussian:0.05
|
| 80 |
+
mel_vmax: 1.5
|
| 81 |
+
mel_vmin: -6
|
| 82 |
+
min_frames: 0
|
| 83 |
+
min_word_dur: 20
|
| 84 |
+
model: rosvot
|
| 85 |
+
noise_in_test: false
|
| 86 |
+
noise_prob: 0.8
|
| 87 |
+
noise_snr: 6-20
|
| 88 |
+
note_bd_add_noise: gaussian:0.002
|
| 89 |
+
note_bd_focal_loss: 5.0
|
| 90 |
+
note_bd_min_gap: 90
|
| 91 |
+
note_bd_ratio: 2.42312
|
| 92 |
+
note_bd_ref_min_gap: 40
|
| 93 |
+
note_bd_start: 0
|
| 94 |
+
note_bd_temperature: 0.2
|
| 95 |
+
note_bd_threshold: 0.8
|
| 96 |
+
note_num: 85
|
| 97 |
+
note_pitch_label_smoothing: 0.005
|
| 98 |
+
note_pitch_start: 0
|
| 99 |
+
note_pitch_temperature: 0.01
|
| 100 |
+
note_start: 30
|
| 101 |
+
note_type_num: 5
|
| 102 |
+
num_ckpt_keep: 3
|
| 103 |
+
num_sanity_val_steps: 5
|
| 104 |
+
num_valid_plots: 10
|
| 105 |
+
num_valid_stats: 100
|
| 106 |
+
optimizer_adam_beta1: 0.9
|
| 107 |
+
optimizer_adam_beta2: 0.98
|
| 108 |
+
out_wav_norm: false
|
| 109 |
+
pe: rmvpe
|
| 110 |
+
pe_ckpt: pretrained_models/rosvot/rmvpe/model.pt
|
| 111 |
+
pin_memory: true
|
| 112 |
+
pitch_attn_num_head: 4
|
| 113 |
+
pitch_type: frame
|
| 114 |
+
print_nan_grads: false
|
| 115 |
+
processed_data_dir: data/processed/m4
|
| 116 |
+
profile_infer: false
|
| 117 |
+
raw_data_dir: ''
|
| 118 |
+
rename_tmux: false
|
| 119 |
+
resume_from_checkpoint: 0
|
| 120 |
+
save_best: true
|
| 121 |
+
save_codes:
|
| 122 |
+
- modules
|
| 123 |
+
- research
|
| 124 |
+
save_f0: false
|
| 125 |
+
save_gt: true
|
| 126 |
+
save_plot: true
|
| 127 |
+
scheduler: step_lr
|
| 128 |
+
scheduler_lr_gamma: 0.998
|
| 129 |
+
scheduler_lr_step_size: 500
|
| 130 |
+
seed: 42
|
| 131 |
+
soft_note_bd_func: gaussian:80
|
| 132 |
+
sort_by_len: true
|
| 133 |
+
task_cls: tasks.rosvot.task.MidiExtractorTask
|
| 134 |
+
tb_log_interval: 100
|
| 135 |
+
test_ids: []
|
| 136 |
+
test_input_yaml: ''
|
| 137 |
+
test_set_name: test
|
| 138 |
+
train_set_name: train
|
| 139 |
+
train_sets: ''
|
| 140 |
+
unet_skip_layer: false
|
| 141 |
+
updown_rates: 2-2-2-2
|
| 142 |
+
use_mel: true
|
| 143 |
+
use_mel_bins: 40
|
| 144 |
+
use_pitch_embed: true
|
| 145 |
+
use_soft_note: false
|
| 146 |
+
use_soft_note_bd: true
|
| 147 |
+
use_spk_embed: false
|
| 148 |
+
use_spk_id: false
|
| 149 |
+
use_wav: false
|
| 150 |
+
use_word_input: false
|
| 151 |
+
val_check_interval: 1000
|
| 152 |
+
valid_infer_interval: 10000
|
| 153 |
+
valid_monitor_key: val_loss
|
| 154 |
+
valid_monitor_mode: min
|
| 155 |
+
valid_set_name: valid
|
| 156 |
+
warmup_updates: 0
|
| 157 |
+
weight_decay: 0
|
| 158 |
+
win_size: 512
|
| 159 |
+
work_dir: checkpoints/rosvot
|
preprocessors/rosvot/rosvot/model.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7501fb5f913d971c2f51bcb3063b930027b03206581820a4d2bfdc394c9c3fcb
|
| 3 |
+
size 144674420
|
preprocessors/rosvot/rwbd/config.yaml
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accumulate_grad_batches: 1
|
| 2 |
+
amp: false
|
| 3 |
+
audio_num_mel_bins: 80
|
| 4 |
+
audio_sample_rate: 24000
|
| 5 |
+
base_config:
|
| 6 |
+
- ./base.yaml
|
| 7 |
+
- ./rosvot.yaml
|
| 8 |
+
binarization_args:
|
| 9 |
+
min_sil_duration: 0.1
|
| 10 |
+
shuffle: false
|
| 11 |
+
test_range:
|
| 12 |
+
- 0
|
| 13 |
+
- 100
|
| 14 |
+
train_range:
|
| 15 |
+
- 200
|
| 16 |
+
- -1
|
| 17 |
+
trim_eos_bos: false
|
| 18 |
+
valid_range:
|
| 19 |
+
- 100
|
| 20 |
+
- 200
|
| 21 |
+
with_align: true
|
| 22 |
+
with_f0: true
|
| 23 |
+
with_f0cwt: false
|
| 24 |
+
with_linear: false
|
| 25 |
+
with_mel: false
|
| 26 |
+
with_spk_embed: false
|
| 27 |
+
with_w2v2_feat: false
|
| 28 |
+
with_wav: true
|
| 29 |
+
binarizer_cls: data_gen.rosvot_binarizer.RosvotBinarizer
|
| 30 |
+
binary_data_dir: data/binary/m4
|
| 31 |
+
bkb_layers: 2
|
| 32 |
+
bkb_net: conformer
|
| 33 |
+
channel_multiples: 1-1-1
|
| 34 |
+
check_val_every_n_epoch: 10
|
| 35 |
+
clip_grad_norm: 1
|
| 36 |
+
clip_grad_value: 0
|
| 37 |
+
conformer_kernel: 9
|
| 38 |
+
dataset_downsample_rate: 1.0
|
| 39 |
+
debug: false
|
| 40 |
+
dropout: 0.1
|
| 41 |
+
ds_names: m4
|
| 42 |
+
ds_names_in_testing: ''
|
| 43 |
+
ds_names_in_training: ''
|
| 44 |
+
ds_workers: 8
|
| 45 |
+
endless_ds: true
|
| 46 |
+
eval_max_batches: -1
|
| 47 |
+
f0_add_noise: gaussian:0.04
|
| 48 |
+
f0_bin: 512
|
| 49 |
+
f0_filepath: ''
|
| 50 |
+
f0_max: 900
|
| 51 |
+
f0_min: 50
|
| 52 |
+
fft_size: 512
|
| 53 |
+
find_unused_parameters: true
|
| 54 |
+
fmax: 12000
|
| 55 |
+
fmin: 30
|
| 56 |
+
frames_multiple: 8
|
| 57 |
+
gen_dir_name: ''
|
| 58 |
+
hidden_size: 256
|
| 59 |
+
hop_size: 128
|
| 60 |
+
infer_meta_path: ''
|
| 61 |
+
infer_print_skipped: true
|
| 62 |
+
infer_regulate_real_note_itv: true
|
| 63 |
+
input_process_name: none
|
| 64 |
+
label_pos_weight_decay: 0.95
|
| 65 |
+
lambda_note_bd: 1.0
|
| 66 |
+
lambda_note_bd_focal: 3.0
|
| 67 |
+
lambda_note_bd_slur_punish: 0.0
|
| 68 |
+
lambda_note_pitch: 1.0
|
| 69 |
+
lambda_word_bd: 1.0
|
| 70 |
+
lambda_word_bd_focal: 3.0
|
| 71 |
+
load_ckpt: ''
|
| 72 |
+
loud_norm: false
|
| 73 |
+
lr: 5.0e-06
|
| 74 |
+
max_epochs: 1000
|
| 75 |
+
max_frames: 4000
|
| 76 |
+
max_input_tokens: 1550
|
| 77 |
+
max_sentences: 128
|
| 78 |
+
max_tokens: 80000
|
| 79 |
+
max_updates: 40000
|
| 80 |
+
max_valid_sentences: 1
|
| 81 |
+
max_valid_tokens: 60000
|
| 82 |
+
mel_add_noise: gaussian:0.05
|
| 83 |
+
mel_vmax: 1.5
|
| 84 |
+
mel_vmin: -6
|
| 85 |
+
min_frames: 0
|
| 86 |
+
min_note_dur: 80
|
| 87 |
+
min_word_dur: 20
|
| 88 |
+
model: rosvot
|
| 89 |
+
noise_in_test: false
|
| 90 |
+
noise_prob: 0.8
|
| 91 |
+
noise_snr: 6-20
|
| 92 |
+
note_bd_add_noise: gaussian:0.002
|
| 93 |
+
note_bd_focal_loss: 5.0
|
| 94 |
+
note_bd_min_gap: 90
|
| 95 |
+
note_bd_ratio: 2.42312
|
| 96 |
+
note_bd_ref_min_gap: 40
|
| 97 |
+
note_bd_start: 0
|
| 98 |
+
note_bd_temperature: 0.2
|
| 99 |
+
note_bd_threshold: 0.8
|
| 100 |
+
note_num: 85
|
| 101 |
+
note_pitch_label_smoothing: 0.005
|
| 102 |
+
note_pitch_start: 0
|
| 103 |
+
note_pitch_temperature: 0.01
|
| 104 |
+
note_start: 30
|
| 105 |
+
num_ckpt_keep: 3
|
| 106 |
+
num_sanity_val_steps: 5
|
| 107 |
+
num_valid_plots: 10
|
| 108 |
+
num_valid_stats: 100
|
| 109 |
+
optimizer_adam_beta1: 0.9
|
| 110 |
+
optimizer_adam_beta2: 0.98
|
| 111 |
+
out_wav_norm: false
|
| 112 |
+
pe: rmvpe
|
| 113 |
+
pe_ckpt: checkpoints/rmvpe/model.pt
|
| 114 |
+
pin_memory: true
|
| 115 |
+
pitch_attn_num_head: 4
|
| 116 |
+
pitch_type: frame
|
| 117 |
+
print_nan_grads: false
|
| 118 |
+
processed_data_dir: data/processed/m4
|
| 119 |
+
profile_infer: false
|
| 120 |
+
raw_data_dir: ''
|
| 121 |
+
rename_tmux: false
|
| 122 |
+
resume_from_checkpoint: 0
|
| 123 |
+
save_best: true
|
| 124 |
+
save_codes:
|
| 125 |
+
- modules
|
| 126 |
+
- tasks
|
| 127 |
+
save_f0: false
|
| 128 |
+
save_gt: true
|
| 129 |
+
save_plot: true
|
| 130 |
+
scheduler: step_lr
|
| 131 |
+
scheduler_lr_gamma: 0.998
|
| 132 |
+
scheduler_lr_step_size: 500
|
| 133 |
+
seed: 42
|
| 134 |
+
soft_note_bd_func: gaussian:80
|
| 135 |
+
soft_word_bd_func: gaussian:80
|
| 136 |
+
sort_by_len: true
|
| 137 |
+
task_cls: tasks.rosvot.task.RobustWordbdTask
|
| 138 |
+
tb_log_interval: 100
|
| 139 |
+
test_ids: []
|
| 140 |
+
test_input_yaml: ''
|
| 141 |
+
test_set_name: test
|
| 142 |
+
train_set_name: train
|
| 143 |
+
train_sets: ''
|
| 144 |
+
unet_skip_layer: false
|
| 145 |
+
updown_rates: 2-2-2
|
| 146 |
+
use_mel: true
|
| 147 |
+
use_mel_bins: 40
|
| 148 |
+
use_pitch_embed: true
|
| 149 |
+
use_soft_note: false
|
| 150 |
+
use_soft_note_bd: true
|
| 151 |
+
use_soft_word_bd: true
|
| 152 |
+
use_spk_embed: false
|
| 153 |
+
use_spk_id: false
|
| 154 |
+
use_wav: false
|
| 155 |
+
use_word_input: false
|
| 156 |
+
val_check_interval: 500
|
| 157 |
+
valid_infer_interval: 10000
|
| 158 |
+
valid_monitor_key: val_loss
|
| 159 |
+
valid_monitor_mode: min
|
| 160 |
+
valid_set_name: valid
|
| 161 |
+
warmup_updates: 0
|
| 162 |
+
weight_decay: 0
|
| 163 |
+
win_size: 512
|
| 164 |
+
word_bd_add_noise: gaussian:0.002
|
| 165 |
+
word_bd_focal_loss: 5.0
|
| 166 |
+
word_bd_min_gap: 90
|
| 167 |
+
word_bd_ratio: 2.2
|
| 168 |
+
word_bd_start: 0
|
| 169 |
+
word_bd_temperature: 0.2
|
| 170 |
+
word_bd_threshold: 0.9
|
| 171 |
+
work_dir: checkpoints/240613-rwbd-03
|
preprocessors/rosvot/rwbd/model.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bc2d42a6d4b7a05436deb937e2deda1c12de49e5687cfda0bdf6a430120dcd2
|
| 3 |
+
size 119897457
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tasks:
|
| 3 |
+
- auto-speech-recognition
|
| 4 |
+
domain:
|
| 5 |
+
- audio
|
| 6 |
+
model-type:
|
| 7 |
+
- Non-autoregressive
|
| 8 |
+
frameworks:
|
| 9 |
+
- pytorch
|
| 10 |
+
backbone:
|
| 11 |
+
- transformer/conformer
|
| 12 |
+
metrics:
|
| 13 |
+
- CER
|
| 14 |
+
license: Apache License 2.0
|
| 15 |
+
language:
|
| 16 |
+
- cn
|
| 17 |
+
tags:
|
| 18 |
+
- FunASR
|
| 19 |
+
- Paraformer
|
| 20 |
+
- Alibaba
|
| 21 |
+
- ICASSP2024
|
| 22 |
+
- Hotword
|
| 23 |
+
datasets:
|
| 24 |
+
train:
|
| 25 |
+
- 50,000 hour industrial Mandarin task
|
| 26 |
+
test:
|
| 27 |
+
- AISHELL-1-hotword dev/test
|
| 28 |
+
indexing:
|
| 29 |
+
results:
|
| 30 |
+
- task:
|
| 31 |
+
name: Automatic Speech Recognition
|
| 32 |
+
dataset:
|
| 33 |
+
name: 50,000 hour industrial Mandarin task
|
| 34 |
+
type: audio # optional
|
| 35 |
+
args: 16k sampling rate, 8404 characters # optional
|
| 36 |
+
metrics:
|
| 37 |
+
- type: CER
|
| 38 |
+
value: 8.53% # float
|
| 39 |
+
description: greedy search, withou lm, avg.
|
| 40 |
+
args: default
|
| 41 |
+
- type: RTF
|
| 42 |
+
value: 0.0251 # float
|
| 43 |
+
description: GPU inference on V100
|
| 44 |
+
args: batch_size=1
|
| 45 |
+
widgets:
|
| 46 |
+
- task: auto-speech-recognition
|
| 47 |
+
inputs:
|
| 48 |
+
- type: audio
|
| 49 |
+
name: input
|
| 50 |
+
title: 音频
|
| 51 |
+
parameters:
|
| 52 |
+
- name: hotword
|
| 53 |
+
title: 热词
|
| 54 |
+
type: string
|
| 55 |
+
examples:
|
| 56 |
+
- name: 1
|
| 57 |
+
title: 示例1
|
| 58 |
+
inputs:
|
| 59 |
+
- name: input
|
| 60 |
+
data: git://example/asr_example.wav
|
| 61 |
+
parameters:
|
| 62 |
+
- name: hotword
|
| 63 |
+
value: 魔搭
|
| 64 |
+
model_revision: v2.0.4
|
| 65 |
+
inferencespec:
|
| 66 |
+
cpu: 8 #CPU数量
|
| 67 |
+
memory: 4096
|
| 68 |
+
---
|
| 69 |
+
|
| 70 |
+
# Paraformer-large模型介绍
|
| 71 |
+
|
| 72 |
+
## Highlights
|
| 73 |
+
Paraformer-large热词版模型支持热词定制功能:实现热词定制化功能,基于提供的热词列表进行激励增强,提升热词的召回率和准确率。
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
## <strong>[FunASR开源项目介绍](https://github.com/alibaba-damo-academy/FunASR)</strong>
|
| 77 |
+
<strong>[FunASR](https://github.com/alibaba-damo-academy/FunASR)</strong>希望在语音识别的学术研究和工业应用之间架起一座桥梁。通过发布工业级语音识别模型的训练和微调,研究人员和开发人员可以更方便地进行语音识别模型的研究和生产,并推动语音识别生态的发展。让语音识别更有趣!
|
| 78 |
+
|
| 79 |
+
[**github仓库**](https://github.com/alibaba-damo-academy/FunASR)
|
| 80 |
+
| [**最新动态**](https://github.com/alibaba-damo-academy/FunASR#whats-new)
|
| 81 |
+
| [**环境安装**](https://github.com/alibaba-damo-academy/FunASR#installation)
|
| 82 |
+
| [**服务部署**](https://www.funasr.com)
|
| 83 |
+
| [**模型库**](https://github.com/alibaba-damo-academy/FunASR/tree/main/model_zoo)
|
| 84 |
+
| [**联系我们**](https://github.com/alibaba-damo-academy/FunASR#contact)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
## 模型原理介绍
|
| 88 |
+
|
| 89 |
+
SeACoParaformer是阿里巴巴语音实验室提出的新一代热词定制化非自回归语音识别模型。相比于上一代基于CLAS的热词定制化方案,SeACoParaformer解耦了热词模块与ASR模型,通过后验概率融合的方式进行热词激励,使激励过程可见可控,并且热词召回率显著提升。
|
| 90 |
+
|
| 91 |
+
<p align="center">
|
| 92 |
+
<img src="fig/seaco.png" alt="SeACoParaformer模型结构" width="380" />
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
SeACoParaformer的模型结构与训练流程如上图所示,通过引入bias encoder进行热词embedding提取,bias decoder进行注意力建模,SeACoParaformer能够捕捉到Predictor输出和Decoder输出的信息与热词的相关性,并且预测与ASR结果同步的热词输出。通过后验概率的融合,实现热词激励。与ContextualParaformer相比,SeACoParaformer有明显的效果提升,如下图所示:
|
| 96 |
+
|
| 97 |
+
<p align="center">
|
| 98 |
+
<img src="fig/res.png" alt="SeACoParaformer模型结构" width="700" />
|
| 99 |
+
|
| 100 |
+
更详细的细节见:
|
| 101 |
+
- 论文: [SeACo-Paraformer: A Non-Autoregressive ASR System with Flexible and Effective Hotword Customization Ability](https://arxiv.org/abs/2308.03266)
|
| 102 |
+
|
| 103 |
+
## 复现论文中的结果
|
| 104 |
+
```python
|
| 105 |
+
from funasr import AutoModel
|
| 106 |
+
|
| 107 |
+
model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
|
| 108 |
+
model_revision="v2.0.4",
|
| 109 |
+
# vad_model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
|
| 110 |
+
# vad_model_revision="v2.0.4",
|
| 111 |
+
# punc_model="damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
|
| 112 |
+
# punc_model_revision="v2.0.4",
|
| 113 |
+
# spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
|
| 114 |
+
# spk_model_revision="v2.0.2",
|
| 115 |
+
device="cuda:0"
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
res = model.generate(input="YOUR_PATH/aishell1_hotword_dev.scp",
|
| 119 |
+
hotword='./data/dev/hotword.txt',
|
| 120 |
+
batch_size_s=300,
|
| 121 |
+
)
|
| 122 |
+
fout1 = open("dev.output", 'w')
|
| 123 |
+
for resi in res:
|
| 124 |
+
fout1.write("{}\t{}\n".format(resi['key'], resi['text']))
|
| 125 |
+
|
| 126 |
+
res = model.generate(input="YOUR_PATH/aishell1_hotword_test.scp",
|
| 127 |
+
hotword='./data/test/hotword.txt',
|
| 128 |
+
batch_size_s=300,
|
| 129 |
+
)
|
| 130 |
+
fout2 = open("test.output", 'w')
|
| 131 |
+
for resi in res:
|
| 132 |
+
fout2.write("{}\t{}\n".format(resi['key'], resi['text']))
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
## 基于ModelScope进行推理
|
| 136 |
+
|
| 137 |
+
- 推理支��音频格式如下:
|
| 138 |
+
- wav文件路径,例如:data/test/audios/asr_example.wav
|
| 139 |
+
- pcm文件路径,例如:data/test/audios/asr_example.pcm
|
| 140 |
+
- wav文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
|
| 141 |
+
- wav二进制数据,格式bytes,例如:用户直接从文件里读出bytes数据或者是麦克风录出bytes数据。
|
| 142 |
+
- 已解析的audio音频,例如:audio, rate = soundfile.read("asr_example_zh.wav"),类型为numpy.ndarray或者torch.Tensor。
|
| 143 |
+
- wav.scp文件,需符合如下要求:
|
| 144 |
+
|
| 145 |
+
```sh
|
| 146 |
+
cat wav.scp
|
| 147 |
+
asr_example1 data/test/audios/asr_example1.wav
|
| 148 |
+
asr_example2 data/test/audios/asr_example2.wav
|
| 149 |
+
...
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
- 若输入格式wav文件url,api调用方式可参考如下范例:
|
| 153 |
+
|
| 154 |
+
```python
|
| 155 |
+
from modelscope.pipelines import pipeline
|
| 156 |
+
from modelscope.utils.constant import Tasks
|
| 157 |
+
|
| 158 |
+
inference_pipeline = pipeline(
|
| 159 |
+
task=Tasks.auto_speech_recognition,
|
| 160 |
+
model='iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', model_revision="v2.0.4")
|
| 161 |
+
|
| 162 |
+
rec_result = inference_pipeline('https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav', hotword='达摩院 魔搭')
|
| 163 |
+
print(rec_result)
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
- 输入音频为pcm格式,调用api时需要传入音频采样率参数audio_fs,例如:
|
| 167 |
+
|
| 168 |
+
```python
|
| 169 |
+
rec_result = inference_pipeline('https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.pcm', fs=16000, hotword='达摩院 魔搭')
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
- 输入音频为wav格式,api调用方式可参考如下范例:
|
| 173 |
+
|
| 174 |
+
```python
|
| 175 |
+
rec_result = inference_pipeline('asr_example_zh.wav', hotword='达摩院 魔搭')
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
- 若输入格式为文件wav.scp(注:文件名需要以.scp结尾),可添加 output_dir 参数将识别结果写入文件中,api调用方式可参考如下范例:
|
| 179 |
+
|
| 180 |
+
```python
|
| 181 |
+
inference_pipeline("wav.scp", output_dir='./output_dir', hotword='达摩院 魔搭')
|
| 182 |
+
```
|
| 183 |
+
识别结果输出路径结构如下:
|
| 184 |
+
|
| 185 |
+
```sh
|
| 186 |
+
tree output_dir/
|
| 187 |
+
output_dir/
|
| 188 |
+
└── 1best_recog
|
| 189 |
+
├── score
|
| 190 |
+
└── text
|
| 191 |
+
|
| 192 |
+
1 directory, 3 files
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
score:识别路径得分
|
| 196 |
+
|
| 197 |
+
text:语音识别结果文件
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
- 若输入音频为已解析的audio音频,api调用方式可参考如下范例:
|
| 201 |
+
|
| 202 |
+
```python
|
| 203 |
+
import soundfile
|
| 204 |
+
|
| 205 |
+
waveform, sample_rate = soundfile.read("asr_example_zh.wav")
|
| 206 |
+
rec_result = inference_pipeline(waveform, hotword='达摩院 魔搭')
|
| 207 |
+
```
|
| 208 |
+
|
| 209 |
+
- ASR、VAD、PUNC模型自由组合
|
| 210 |
+
|
| 211 |
+
可根据使用需求对VAD和PUNC标点模型进行自由组合,使用方式如下:
|
| 212 |
+
```python
|
| 213 |
+
inference_pipeline = pipeline(
|
| 214 |
+
task=Tasks.auto_speech_recognition,
|
| 215 |
+
model='iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', model_revision="v2.0.4",
|
| 216 |
+
vad_model='iic/speech_fsmn_vad_zh-cn-16k-common-pytorch', vad_model_revision="v2.0.4",
|
| 217 |
+
punc_model='iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', punc_model_revision="v2.0.3",
|
| 218 |
+
# spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
|
| 219 |
+
# spk_model_revision="v2.0.2",
|
| 220 |
+
)
|
| 221 |
+
```
|
| 222 |
+
若不使用PUNC模型,可配置punc_model=None,或不传入punc_model参数,如需加入LM模型,可增加配置lm_model='iic/speech_transformer_lm_zh-cn-common-vocab8404-pytorch',并设置lm_weight和beam_size参数。
|
| 223 |
+
|
| 224 |
+
## 基于FunASR进行推理
|
| 225 |
+
|
| 226 |
+
下面为快速上手教程,测试音频([中文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav),[英文](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav))
|
| 227 |
+
|
| 228 |
+
### 可执行命令行
|
| 229 |
+
在命令行终端执行:
|
| 230 |
+
|
| 231 |
+
```shell
|
| 232 |
+
funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=vad_example.wav
|
| 233 |
+
```
|
| 234 |
+
|
| 235 |
+
注:支持单条音频文件识别,也支持文件列表,列表为kaldi风格wav.scp:`wav_id wav_path`
|
| 236 |
+
|
| 237 |
+
### python示例
|
| 238 |
+
#### 非实时语音识别
|
| 239 |
+
```python
|
| 240 |
+
from funasr import AutoModel
|
| 241 |
+
# paraformer-zh is a multi-functional asr model
|
| 242 |
+
# use vad, punc, spk or not as you need
|
| 243 |
+
model = AutoModel(model="paraformer-zh", model_revision="v2.0.4",
|
| 244 |
+
vad_model="fsmn-vad", vad_model_revision="v2.0.4",
|
| 245 |
+
punc_model="ct-punc-c", punc_model_revision="v2.0.4",
|
| 246 |
+
# spk_model="cam++", spk_model_revision="v2.0.2",
|
| 247 |
+
)
|
| 248 |
+
res = model.generate(input=f"{model.model_path}/example/asr_example.wav",
|
| 249 |
+
batch_size_s=300,
|
| 250 |
+
hotword='魔搭')
|
| 251 |
+
print(res)
|
| 252 |
+
```
|
| 253 |
+
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
| 254 |
+
|
| 255 |
+
#### 实时语音识别
|
| 256 |
+
|
| 257 |
+
```python
|
| 258 |
+
from funasr import AutoModel
|
| 259 |
+
|
| 260 |
+
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
| 261 |
+
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
| 262 |
+
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
| 263 |
+
|
| 264 |
+
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
|
| 265 |
+
|
| 266 |
+
import soundfile
|
| 267 |
+
import os
|
| 268 |
+
|
| 269 |
+
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
|
| 270 |
+
speech, sample_rate = soundfile.read(wav_file)
|
| 271 |
+
chunk_stride = chunk_size[1] * 960 # 600ms
|
| 272 |
+
|
| 273 |
+
cache = {}
|
| 274 |
+
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
| 275 |
+
for i in range(total_chunk_num):
|
| 276 |
+
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
| 277 |
+
is_final = i == total_chunk_num - 1
|
| 278 |
+
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
|
| 279 |
+
print(res)
|
| 280 |
+
```
|
| 281 |
+
|
| 282 |
+
注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。
|
| 283 |
+
|
| 284 |
+
#### 语音端点检测(非实时)
|
| 285 |
+
```python
|
| 286 |
+
from funasr import AutoModel
|
| 287 |
+
|
| 288 |
+
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
| 289 |
+
|
| 290 |
+
wav_file = f"{model.model_path}/example/asr_example.wav"
|
| 291 |
+
res = model.generate(input=wav_file)
|
| 292 |
+
print(res)
|
| 293 |
+
```
|
| 294 |
+
|
| 295 |
+
#### 语音端点检测(实时)
|
| 296 |
+
```python
|
| 297 |
+
from funasr import AutoModel
|
| 298 |
+
|
| 299 |
+
chunk_size = 200 # ms
|
| 300 |
+
model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
|
| 301 |
+
|
| 302 |
+
import soundfile
|
| 303 |
+
|
| 304 |
+
wav_file = f"{model.model_path}/example/vad_example.wav"
|
| 305 |
+
speech, sample_rate = soundfile.read(wav_file)
|
| 306 |
+
chunk_stride = int(chunk_size * sample_rate / 1000)
|
| 307 |
+
|
| 308 |
+
cache = {}
|
| 309 |
+
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
| 310 |
+
for i in range(total_chunk_num):
|
| 311 |
+
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
| 312 |
+
is_final = i == total_chunk_num - 1
|
| 313 |
+
res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size)
|
| 314 |
+
if len(res[0]["value"]):
|
| 315 |
+
print(res)
|
| 316 |
+
```
|
| 317 |
+
|
| 318 |
+
#### 标点恢复
|
| 319 |
+
```python
|
| 320 |
+
from funasr import AutoModel
|
| 321 |
+
|
| 322 |
+
model = AutoModel(model="ct-punc", model_revision="v2.0.4")
|
| 323 |
+
|
| 324 |
+
res = model.generate(input="那今天的会就到这里吧 happy new year 明年见")
|
| 325 |
+
print(res)
|
| 326 |
+
```
|
| 327 |
+
|
| 328 |
+
#### 时间戳预测
|
| 329 |
+
```python
|
| 330 |
+
from funasr import AutoModel
|
| 331 |
+
|
| 332 |
+
model = AutoModel(model="fa-zh", model_revision="v2.0.4")
|
| 333 |
+
|
| 334 |
+
wav_file = f"{model.model_path}/example/asr_example.wav"
|
| 335 |
+
text_file = f"{model.model_path}/example/text.txt"
|
| 336 |
+
res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
|
| 337 |
+
print(res)
|
| 338 |
+
```
|
| 339 |
+
|
| 340 |
+
更多详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
## 微调
|
| 344 |
+
|
| 345 |
+
详细用法([示例](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining))
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
## 相关论文以及引用信息
|
| 349 |
+
|
| 350 |
+
```BibTeX
|
| 351 |
+
@article{shi2023seaco,
|
| 352 |
+
title={SeACo-Paraformer: A Non-Autoregressive ASR System with Flexible and Effective Hotword Customization Ability},
|
| 353 |
+
author={Shi, Xian and Yang, Yexin and Li, Zerui and Zhang, Shiliang},
|
| 354 |
+
journal={arXiv preprint arXiv:2308.03266 (accepted by ICASSP2024)},
|
| 355 |
+
year={2023}
|
| 356 |
+
}
|
| 357 |
+
```
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/am.mvn
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<Nnet>
|
| 2 |
+
<Splice> 560 560
|
| 3 |
+
[ 0 ]
|
| 4 |
+
<AddShift> 560 560
|
| 5 |
+
<LearnRateCoef> 0 [ -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 -8.311879 -8.600912 -9.615928 -10.43595 -11.21292 -11.88333 -12.36243 -12.63706 -12.8818 -12.83066 -12.89103 -12.95666 -13.19763 -13.40598 -13.49113 -13.5546 -13.55639 -13.51915 -13.68284 -13.53289 -13.42107 -13.65519 -13.50713 -13.75251 -13.76715 -13.87408 -13.73109 -13.70412 -13.56073 -13.53488 -13.54895 -13.56228 -13.59408 -13.62047 -13.64198 -13.66109 -13.62669 -13.58297 -13.57387 -13.4739 -13.53063 -13.48348 -13.61047 -13.64716 -13.71546 -13.79184 -13.90614 -14.03098 -14.18205 -14.35881 -14.48419 -14.60172 -14.70591 -14.83362 -14.92122 -15.00622 -15.05122 -15.03119 -14.99028 -14.92302 -14.86927 -14.82691 -14.7972 -14.76909 -14.71356 -14.61277 -14.51696 -14.42252 -14.36405 -14.30451 -14.23161 -14.19851 -14.16633 -14.15649 -14.10504 -13.99518 -13.79562 -13.3996 -12.7767 -11.71208 ]
|
| 6 |
+
<Rescale> 560 560
|
| 7 |
+
<LearnRateCoef> 0 [ 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 0.155775 0.154484 0.1527379 0.1518718 0.1506028 0.1489256 0.147067 0.1447061 0.1436307 0.1443568 0.1451849 0.1455157 0.1452821 0.1445717 0.1439195 0.1435867 0.1436018 0.1438781 0.1442086 0.1448844 0.1454756 0.145663 0.146268 0.1467386 0.1472724 0.147664 0.1480913 0.1483739 0.1488841 0.1493636 0.1497088 0.1500379 0.1502916 0.1505389 0.1506787 0.1507102 0.1505992 0.1505445 0.1505938 0.1508133 0.1509569 0.1512396 0.1514625 0.1516195 0.1516156 0.1515561 0.1514966 0.1513976 0.1512612 0.151076 0.1510596 0.1510431 0.151077 0.1511168 0.1511917 0.151023 0.1508045 0.1505885 0.1503493 0.1502373 0.1501726 0.1500762 0.1500065 0.1499782 0.150057 0.1502658 0.150469 0.1505335 0.1505505 0.1505328 0.1504275 0.1502438 0.1499674 0.1497118 0.1494661 0.1493102 0.1493681 0.1495501 0.1499738 0.1509654 ]
|
| 8 |
+
</Nnet>
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/asr_example_hotword.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51792bc95be33075c1a8abb9afb76ad9f72943e84cd723cc8825b2678799b004
|
| 3 |
+
size 253642
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/config.yaml
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This is an example that demonstrates how to configure a model file.
|
| 2 |
+
# You can modify the configuration according to your own requirements.
|
| 3 |
+
|
| 4 |
+
# to print the register_table:
|
| 5 |
+
# from funasr.utils.register import registry_tables
|
| 6 |
+
# registry_tables.print()
|
| 7 |
+
|
| 8 |
+
# network architecture
|
| 9 |
+
model: SeacoParaformer
|
| 10 |
+
model_conf:
|
| 11 |
+
ctc_weight: 0.0
|
| 12 |
+
lsm_weight: 0.1
|
| 13 |
+
length_normalized_loss: true
|
| 14 |
+
predictor_weight: 1.0
|
| 15 |
+
predictor_bias: 1
|
| 16 |
+
sampling_ratio: 0.75
|
| 17 |
+
inner_dim: 512
|
| 18 |
+
bias_encoder_type: lstm
|
| 19 |
+
bias_encoder_bid: false
|
| 20 |
+
seaco_lsm_weight: 0.1
|
| 21 |
+
seaco_length_normal: true
|
| 22 |
+
train_decoder: true
|
| 23 |
+
NO_BIAS: 8377
|
| 24 |
+
|
| 25 |
+
# encoder
|
| 26 |
+
encoder: SANMEncoder
|
| 27 |
+
encoder_conf:
|
| 28 |
+
output_size: 512
|
| 29 |
+
attention_heads: 4
|
| 30 |
+
linear_units: 2048
|
| 31 |
+
num_blocks: 50
|
| 32 |
+
dropout_rate: 0.1
|
| 33 |
+
positional_dropout_rate: 0.1
|
| 34 |
+
attention_dropout_rate: 0.1
|
| 35 |
+
input_layer: pe
|
| 36 |
+
pos_enc_class: SinusoidalPositionEncoder
|
| 37 |
+
normalize_before: true
|
| 38 |
+
kernel_size: 11
|
| 39 |
+
sanm_shfit: 0
|
| 40 |
+
selfattention_layer_type: sanm
|
| 41 |
+
|
| 42 |
+
# decoder
|
| 43 |
+
decoder: ParaformerSANMDecoder
|
| 44 |
+
decoder_conf:
|
| 45 |
+
attention_heads: 4
|
| 46 |
+
linear_units: 2048
|
| 47 |
+
num_blocks: 16
|
| 48 |
+
dropout_rate: 0.1
|
| 49 |
+
positional_dropout_rate: 0.1
|
| 50 |
+
self_attention_dropout_rate: 0.1
|
| 51 |
+
src_attention_dropout_rate: 0.1
|
| 52 |
+
att_layer_num: 16
|
| 53 |
+
kernel_size: 11
|
| 54 |
+
sanm_shfit: 0
|
| 55 |
+
|
| 56 |
+
# seaco decoder
|
| 57 |
+
seaco_decoder: ParaformerSANMDecoder
|
| 58 |
+
seaco_decoder_conf:
|
| 59 |
+
attention_heads: 4
|
| 60 |
+
linear_units: 1024
|
| 61 |
+
num_blocks: 4
|
| 62 |
+
dropout_rate: 0.1
|
| 63 |
+
positional_dropout_rate: 0.1
|
| 64 |
+
self_attention_dropout_rate: 0.1
|
| 65 |
+
src_attention_dropout_rate: 0.1
|
| 66 |
+
kernel_size: 21
|
| 67 |
+
sanm_shfit: 0
|
| 68 |
+
use_output_layer: false
|
| 69 |
+
wo_input_layer: true
|
| 70 |
+
|
| 71 |
+
predictor: CifPredictorV3
|
| 72 |
+
predictor_conf:
|
| 73 |
+
idim: 512
|
| 74 |
+
threshold: 1.0
|
| 75 |
+
l_order: 1
|
| 76 |
+
r_order: 1
|
| 77 |
+
tail_threshold: 0.45
|
| 78 |
+
smooth_factor2: 0.25
|
| 79 |
+
noise_threshold2: 0.01
|
| 80 |
+
upsample_times: 3
|
| 81 |
+
use_cif1_cnn: false
|
| 82 |
+
upsample_type: cnn_blstm
|
| 83 |
+
|
| 84 |
+
# frontend related
|
| 85 |
+
frontend: WavFrontend
|
| 86 |
+
frontend_conf:
|
| 87 |
+
fs: 16000
|
| 88 |
+
window: hamming
|
| 89 |
+
n_mels: 80
|
| 90 |
+
frame_length: 25
|
| 91 |
+
frame_shift: 10
|
| 92 |
+
lfr_m: 7
|
| 93 |
+
lfr_n: 6
|
| 94 |
+
dither: 0.0
|
| 95 |
+
|
| 96 |
+
specaug: SpecAugLFR
|
| 97 |
+
specaug_conf:
|
| 98 |
+
apply_time_warp: false
|
| 99 |
+
time_warp_window: 5
|
| 100 |
+
time_warp_mode: bicubic
|
| 101 |
+
apply_freq_mask: true
|
| 102 |
+
freq_mask_width_range:
|
| 103 |
+
- 0
|
| 104 |
+
- 30
|
| 105 |
+
lfr_rate: 6
|
| 106 |
+
num_freq_mask: 1
|
| 107 |
+
apply_time_mask: true
|
| 108 |
+
time_mask_width_range:
|
| 109 |
+
- 0
|
| 110 |
+
- 12
|
| 111 |
+
num_time_mask: 1
|
| 112 |
+
|
| 113 |
+
train_conf:
|
| 114 |
+
accum_grad: 1
|
| 115 |
+
grad_clip: 5
|
| 116 |
+
max_epoch: 150
|
| 117 |
+
val_scheduler_criterion:
|
| 118 |
+
- valid
|
| 119 |
+
- acc
|
| 120 |
+
best_model_criterion:
|
| 121 |
+
- - valid
|
| 122 |
+
- acc
|
| 123 |
+
- max
|
| 124 |
+
keep_nbest_models: 10
|
| 125 |
+
log_interval: 50
|
| 126 |
+
unused_parameters: true
|
| 127 |
+
|
| 128 |
+
optim: adam
|
| 129 |
+
optim_conf:
|
| 130 |
+
lr: 0.0005
|
| 131 |
+
scheduler: warmuplr
|
| 132 |
+
scheduler_conf:
|
| 133 |
+
warmup_steps: 30000
|
| 134 |
+
|
| 135 |
+
dataset: AudioDatasetHotword
|
| 136 |
+
dataset_conf:
|
| 137 |
+
seaco_id: 8377
|
| 138 |
+
index_ds: IndexDSJsonl
|
| 139 |
+
batch_sampler: DynamicBatchLocalShuffleSampler
|
| 140 |
+
batch_type: example # example or length
|
| 141 |
+
batch_size: 1 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
|
| 142 |
+
max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
|
| 143 |
+
buffer_size: 500
|
| 144 |
+
shuffle: True
|
| 145 |
+
num_workers: 0
|
| 146 |
+
|
| 147 |
+
tokenizer: CharTokenizer
|
| 148 |
+
tokenizer_conf:
|
| 149 |
+
unk_symbol: <unk>
|
| 150 |
+
split_with_space: true
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
ctc_conf:
|
| 154 |
+
dropout_rate: 0.0
|
| 155 |
+
ctc_type: builtin
|
| 156 |
+
reduce: true
|
| 157 |
+
ignore_nan_grad: true
|
| 158 |
+
|
| 159 |
+
normalize: null
|
| 160 |
+
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/configuration.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"framework": "pytorch",
|
| 3 |
+
"task" : "auto-speech-recognition",
|
| 4 |
+
"model": {"type" : "funasr"},
|
| 5 |
+
"pipeline": {"type":"funasr-pipeline"},
|
| 6 |
+
"model_name_in_hub": {
|
| 7 |
+
"ms":"iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
|
| 8 |
+
"hf":""},
|
| 9 |
+
"file_path_metas": {
|
| 10 |
+
"init_param":"model.pt",
|
| 11 |
+
"config":"config.yaml",
|
| 12 |
+
"tokenizer_conf": {"token_list": "tokens.json", "seg_dict_file": "seg_dict"},
|
| 13 |
+
"frontend_conf":{"cmvn_file": "am.mvn"}}
|
| 14 |
+
}
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ffa478de2cd570dd54e8762008cd6bbde9871fd79757f1cdbbec7d6b7b49274
|
| 3 |
+
size 144770
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/hotword.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
魔搭
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/res.png
ADDED
|
Git LFS Details
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/fig/seaco.png
ADDED
|
Git LFS Details
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d491689244ec5dfbf9170ef3827c358aa10f1f20e42a7c59e15e688647946d1
|
| 3 |
+
size 989763045
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/seg_dict
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
preprocessors/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/tokens.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|