Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -45,7 +45,10 @@ class model_onxx:
|
|
| 45 |
|
| 46 |
|
| 47 |
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
| 49 |
def function_change(self,n_model,token,n_onxx,choice):
|
| 50 |
if choice=="decoder":
|
| 51 |
|
|
@@ -67,15 +70,13 @@ class model_onxx:
|
|
| 67 |
def convert_to_onnx_only_decoder(self,n_model,token,namemodelonxx):
|
| 68 |
model=VitsModel.from_pretrained(n_model,token=token)
|
| 69 |
x=f"{namemodelonxx}.onnx"
|
| 70 |
-
|
| 71 |
-
os.makedirs(storage_dir)
|
| 72 |
-
file_path = os.path.join("uploads",x)
|
| 73 |
vocab_size = model.text_encoder.embed_tokens.weight.size(0)
|
| 74 |
example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
|
| 75 |
torch.onnx.export(
|
| 76 |
model, # The model to be exported
|
| 77 |
example_input, # Example input for the model
|
| 78 |
-
|
| 79 |
opset_version=11, # Use an appropriate ONNX opset version
|
| 80 |
input_names=['input'], # Name of the input layer
|
| 81 |
output_names=['output'], # Name of the output layer
|
|
@@ -84,7 +85,7 @@ class model_onxx:
|
|
| 84 |
'output': {0: 'batch_size'}
|
| 85 |
}
|
| 86 |
)
|
| 87 |
-
return
|
| 88 |
def convert_to_onnx_all(self,n_model,token ,namemodelonxx):
|
| 89 |
|
| 90 |
model=VitsModel.from_pretrained(n_model,token=token)
|
|
@@ -104,7 +105,7 @@ class model_onxx:
|
|
| 104 |
'output': {0: 'batch_size'}
|
| 105 |
}
|
| 106 |
)
|
| 107 |
-
|
| 108 |
def starrt(self):
|
| 109 |
#with gr.Blocks() as demo:
|
| 110 |
with gr.Row():
|
|
|
|
| 45 |
|
| 46 |
|
| 47 |
|
| 48 |
+
def download_file(self,file_path):
|
| 49 |
+
ff= gr.File(value=file_path, visible=True)
|
| 50 |
+
file_url = ff.value['url']
|
| 51 |
+
return file_url
|
| 52 |
def function_change(self,n_model,token,n_onxx,choice):
|
| 53 |
if choice=="decoder":
|
| 54 |
|
|
|
|
| 70 |
def convert_to_onnx_only_decoder(self,n_model,token,namemodelonxx):
|
| 71 |
model=VitsModel.from_pretrained(n_model,token=token)
|
| 72 |
x=f"{namemodelonxx}.onnx"
|
| 73 |
+
|
|
|
|
|
|
|
| 74 |
vocab_size = model.text_encoder.embed_tokens.weight.size(0)
|
| 75 |
example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
|
| 76 |
torch.onnx.export(
|
| 77 |
model, # The model to be exported
|
| 78 |
example_input, # Example input for the model
|
| 79 |
+
x, # The filename for the exported ONNX model
|
| 80 |
opset_version=11, # Use an appropriate ONNX opset version
|
| 81 |
input_names=['input'], # Name of the input layer
|
| 82 |
output_names=['output'], # Name of the output layer
|
|
|
|
| 85 |
'output': {0: 'batch_size'}
|
| 86 |
}
|
| 87 |
)
|
| 88 |
+
return self.download_file(x)
|
| 89 |
def convert_to_onnx_all(self,n_model,token ,namemodelonxx):
|
| 90 |
|
| 91 |
model=VitsModel.from_pretrained(n_model,token=token)
|
|
|
|
| 105 |
'output': {0: 'batch_size'}
|
| 106 |
}
|
| 107 |
)
|
| 108 |
+
return self.download_file(x)
|
| 109 |
def starrt(self):
|
| 110 |
#with gr.Blocks() as demo:
|
| 111 |
with gr.Row():
|