Upload model
Browse files- config.json +3 -3
- model.safetensors +1 -1
config.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "/content/drive/MyDrive/
|
| 3 |
"activation_dropout": 0.1,
|
| 4 |
"architectures": [
|
| 5 |
-
"
|
| 6 |
],
|
| 7 |
"attention_dropout": 0.1,
|
| 8 |
"depth_separable_channels": 2,
|
|
@@ -81,7 +81,7 @@
|
|
| 81 |
"sampling_rate": 16000,
|
| 82 |
"segment_size": 8192,
|
| 83 |
"speaker_embedding_size": 0,
|
| 84 |
-
"speaking_rate": 0
|
| 85 |
"spectrogram_bins": 513,
|
| 86 |
"torch_dtype": "float32",
|
| 87 |
"transformers_version": "4.44.2",
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "/content/drive/MyDrive/FINE/Huba/v1",
|
| 3 |
"activation_dropout": 0.1,
|
| 4 |
"architectures": [
|
| 5 |
+
"VitsModelForPreTraining"
|
| 6 |
],
|
| 7 |
"attention_dropout": 0.1,
|
| 8 |
"depth_separable_channels": 2,
|
|
|
|
| 81 |
"sampling_rate": 16000,
|
| 82 |
"segment_size": 8192,
|
| 83 |
"speaker_embedding_size": 0,
|
| 84 |
+
"speaking_rate": 1.0,
|
| 85 |
"spectrogram_bins": 513,
|
| 86 |
"torch_dtype": "float32",
|
| 87 |
"transformers_version": "4.44.2",
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 332161480
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75b269fde54d66ffa0c152fdef8da5d0d8557e5538d466cd22966c4214677a25
|
| 3 |
size 332161480
|