Update app.py
Browse files
app.py
CHANGED
|
@@ -15,15 +15,15 @@ nlp_a = pipeline('question-answering', model='mrm8488/distill-bert-base-spanish-
|
|
| 15 |
))
|
| 16 |
def generate_summary(text):
|
| 17 |
inputs = tokenizer([text], padding="max_length", truncation=True, max_length=64, return_tensors="pt")
|
| 18 |
-
input_ids = inputs.input_ids
|
| 19 |
-
attention_mask = inputs.attention_mask
|
| 20 |
output = model.generate(input_ids, attention_mask=attention_mask)
|
| 21 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
| 22 |
def generate_simple_text(data):
|
| 23 |
outputs = []
|
| 24 |
for text in data.split("."):
|
| 25 |
inputs = tokenizer_s(text, max_length=1024, padding=True, truncation=True, return_tensors='pt')
|
| 26 |
-
output = model_s.generate(inputs['input_ids']
|
| 27 |
outputs.append(['\n'.join([tokenizer_s.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in output])])
|
| 28 |
return outputs
|
| 29 |
def generate_questions(data):
|
|
|
|
| 15 |
))
|
| 16 |
def generate_summary(text):
|
| 17 |
inputs = tokenizer([text], padding="max_length", truncation=True, max_length=64, return_tensors="pt")
|
| 18 |
+
input_ids = inputs.input_ids
|
| 19 |
+
attention_mask = inputs.attention_mask
|
| 20 |
output = model.generate(input_ids, attention_mask=attention_mask)
|
| 21 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
| 22 |
def generate_simple_text(data):
|
| 23 |
outputs = []
|
| 24 |
for text in data.split("."):
|
| 25 |
inputs = tokenizer_s(text, max_length=1024, padding=True, truncation=True, return_tensors='pt')
|
| 26 |
+
output = model_s.generate(inputs['input_ids'], max_length=100)
|
| 27 |
outputs.append(['\n'.join([tokenizer_s.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in output])])
|
| 28 |
return outputs
|
| 29 |
def generate_questions(data):
|