Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
CHANGED
|
@@ -16,13 +16,13 @@ try:
|
|
| 16 |
except:
|
| 17 |
print("english model load error")
|
| 18 |
|
|
|
|
| 19 |
try:
|
| 20 |
-
tokenizer_multilingual = AutoTokenizer.from_pretrained("amir22010/PyABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 21 |
double_multilingual_generator = AutoModelForSeq2SeqLM.from_pretrained("amir22010/PyABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 22 |
except:
|
| 23 |
print("multilingual model load error")
|
| 24 |
|
| 25 |
-
'''
|
| 26 |
try:
|
| 27 |
tokenizer_keybert = AutoTokenizer.from_pretrained("amir22010/KeyBert_ABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 28 |
double_keybert_generator = AutoModelForSeq2SeqLM.from_pretrained("amir22010/KeyBert_ABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
|
@@ -35,9 +35,9 @@ def perform_asde_inference(text, dataset, model_id):
|
|
| 35 |
if not text:
|
| 36 |
if model_id == "PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 37 |
df = pd.read_csv('pyabsa_english.csv')#validation dataset
|
|
|
|
| 38 |
elif model_id == "PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 39 |
df = pd.read_csv('pyabsa_multilingual.csv')#validation dataset
|
| 40 |
-
'''
|
| 41 |
elif model_id == "KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 42 |
df = pd.read_csv('keybert_valid.csv')#validation dataset
|
| 43 |
'''
|
|
@@ -67,11 +67,12 @@ def perform_asde_inference(text, dataset, model_id):
|
|
| 67 |
output = double_english_generator.generate(tokenized_text.input_ids,max_length=512)
|
| 68 |
model_generated = tokenizer_english.decode(output[0], skip_special_tokens=True)
|
| 69 |
|
|
|
|
| 70 |
elif model_id == "PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 71 |
tokenized_text = tokenizer_multilingual(bos_instruction + text + delim_instruct + eos_instruct, return_tensors="pt")
|
| 72 |
output = double_multilingual_generator.generate(tokenized_text.input_ids,max_length=512)
|
| 73 |
model_generated = tokenizer_multilingual.decode(output[0], skip_special_tokens=True)
|
| 74 |
-
|
| 75 |
elif model_id == "KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 76 |
tokenized_text = tokenizer_keybert(bos_instruction + text + delim_instruct + eos_instruct, return_tensors="pt")
|
| 77 |
output = double_keybert_generator.generate(tokenized_text.input_ids,max_length=512)
|
|
@@ -123,7 +124,7 @@ if __name__ == "__main__":
|
|
| 123 |
asde_model_ids = gr.Radio(
|
| 124 |
choices=[
|
| 125 |
"PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|
| 126 |
-
"PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|
| 127 |
#"KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model"
|
| 128 |
],
|
| 129 |
value="PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|
|
|
|
| 16 |
except:
|
| 17 |
print("english model load error")
|
| 18 |
|
| 19 |
+
'''
|
| 20 |
try:
|
| 21 |
+
tokenizer_multilingual = AutoTokenizer.from_pretrained("amir22010/amir22010/PyABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 22 |
double_multilingual_generator = AutoModelForSeq2SeqLM.from_pretrained("amir22010/PyABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 23 |
except:
|
| 24 |
print("multilingual model load error")
|
| 25 |
|
|
|
|
| 26 |
try:
|
| 27 |
tokenizer_keybert = AutoTokenizer.from_pretrained("amir22010/KeyBert_ABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
| 28 |
double_keybert_generator = AutoModelForSeq2SeqLM.from_pretrained("amir22010/KeyBert_ABSA_Hospital_Multilingual_allenai_tk-instruct-base-def-pos_FinedTuned_Model")
|
|
|
|
| 35 |
if not text:
|
| 36 |
if model_id == "PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 37 |
df = pd.read_csv('pyabsa_english.csv')#validation dataset
|
| 38 |
+
'''
|
| 39 |
elif model_id == "PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 40 |
df = pd.read_csv('pyabsa_multilingual.csv')#validation dataset
|
|
|
|
| 41 |
elif model_id == "KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 42 |
df = pd.read_csv('keybert_valid.csv')#validation dataset
|
| 43 |
'''
|
|
|
|
| 67 |
output = double_english_generator.generate(tokenized_text.input_ids,max_length=512)
|
| 68 |
model_generated = tokenizer_english.decode(output[0], skip_special_tokens=True)
|
| 69 |
|
| 70 |
+
'''
|
| 71 |
elif model_id == "PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 72 |
tokenized_text = tokenizer_multilingual(bos_instruction + text + delim_instruct + eos_instruct, return_tensors="pt")
|
| 73 |
output = double_multilingual_generator.generate(tokenized_text.input_ids,max_length=512)
|
| 74 |
model_generated = tokenizer_multilingual.decode(output[0], skip_special_tokens=True)
|
| 75 |
+
|
| 76 |
elif model_id == "KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model":
|
| 77 |
tokenized_text = tokenizer_keybert(bos_instruction + text + delim_instruct + eos_instruct, return_tensors="pt")
|
| 78 |
output = double_keybert_generator.generate(tokenized_text.input_ids,max_length=512)
|
|
|
|
| 124 |
asde_model_ids = gr.Radio(
|
| 125 |
choices=[
|
| 126 |
"PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|
| 127 |
+
#"PyABSA_Hospital_Multilingual_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|
| 128 |
#"KeyBert_ABSA_Hospital_allenai/tk-instruct-base-def-pos_FinedTuned_Model"
|
| 129 |
],
|
| 130 |
value="PyABSA_Hospital_English_allenai/tk-instruct-base-def-pos_FinedTuned_Model",
|