Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,9 +9,7 @@ from VitsModelSplit.vits_model2 import VitsModel,get_state_grad_loss
|
|
| 9 |
import VitsModelSplit.monotonic_align as monotonic_align
|
| 10 |
|
| 11 |
token=os.environ.get("key_")
|
| 12 |
-
|
| 13 |
-
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
-
model_vits=VitsModel.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)#.to(device)
|
| 15 |
|
| 16 |
# import VitsModelSplit.monotonic_align as monotonic_align
|
| 17 |
from IPython.display import clear_output
|
|
@@ -188,7 +186,7 @@ def train_step(batch,models=[],optimizers=[], training_args=None,tools=[]):
|
|
| 188 |
feature_extractor,maf,dict_state_grad_loss=tools
|
| 189 |
|
| 190 |
with autocast(enabled=training_args.fp16):
|
| 191 |
-
speaker_embeddings=get_embed_speaker(
|
| 192 |
waveform,ids_slice,log_duration,prior_latents,posterior_log_variances,prior_means,prior_log_variances,labels_padding_mask = self.forward_train(
|
| 193 |
input_ids=batch["input_ids"],
|
| 194 |
attention_mask=batch["attention_mask"],
|
|
|
|
| 9 |
import VitsModelSplit.monotonic_align as monotonic_align
|
| 10 |
|
| 11 |
token=os.environ.get("key_")
|
| 12 |
+
|
|
|
|
|
|
|
| 13 |
|
| 14 |
# import VitsModelSplit.monotonic_align as monotonic_align
|
| 15 |
from IPython.display import clear_output
|
|
|
|
| 186 |
feature_extractor,maf,dict_state_grad_loss=tools
|
| 187 |
|
| 188 |
with autocast(enabled=training_args.fp16):
|
| 189 |
+
speaker_embeddings=get_embed_speaker(self,batch["speaker_id"])
|
| 190 |
waveform,ids_slice,log_duration,prior_latents,posterior_log_variances,prior_means,prior_log_variances,labels_padding_mask = self.forward_train(
|
| 191 |
input_ids=batch["input_ids"],
|
| 192 |
attention_mask=batch["attention_mask"],
|