Spaces:
Sleeping
Sleeping
Update infer_zipvoice.py
Browse files- infer_zipvoice.py +3 -3
infer_zipvoice.py
CHANGED
|
@@ -245,18 +245,18 @@ model_defaults = {
|
|
| 245 |
device = torch.device("cuda", 0)
|
| 246 |
|
| 247 |
print("Loading model...")
|
| 248 |
-
model_config = "
|
| 249 |
|
| 250 |
with open(model_config, "r") as f:
|
| 251 |
model_config = json.load(f)
|
| 252 |
|
| 253 |
-
token_file = "
|
| 254 |
|
| 255 |
tokenizer = EspeakTokenizer(token_file=token_file, lang="vi")
|
| 256 |
|
| 257 |
tokenizer_config = {"vocab_size": tokenizer.vocab_size, "pad_id": tokenizer.pad_id}
|
| 258 |
|
| 259 |
-
model_ckpt = "
|
| 260 |
|
| 261 |
model = ZipVoice(
|
| 262 |
**model_config["model"],
|
|
|
|
| 245 |
device = torch.device("cuda", 0)
|
| 246 |
|
| 247 |
print("Loading model...")
|
| 248 |
+
model_config = "config.json"
|
| 249 |
|
| 250 |
with open(model_config, "r") as f:
|
| 251 |
model_config = json.load(f)
|
| 252 |
|
| 253 |
+
token_file = "tokens.txt"
|
| 254 |
|
| 255 |
tokenizer = EspeakTokenizer(token_file=token_file, lang="vi")
|
| 256 |
|
| 257 |
tokenizer_config = {"vocab_size": tokenizer.vocab_size, "pad_id": tokenizer.pad_id}
|
| 258 |
|
| 259 |
+
model_ckpt = "iter-57000-avg-2.pt"
|
| 260 |
|
| 261 |
model = ZipVoice(
|
| 262 |
**model_config["model"],
|