Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import numpy as np | |
| import pandas as pd | |
| import os | |
| import torch | |
| import torch.nn as nn | |
| from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForCausalLM | |
| from transformers.activations import get_activation | |
| st.title('Informal to Formal:') | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| st.text('''Check out this other space: https://huggingface.co/spaces/BigSalmon/GPT2Space''') | |
| st.text('''How To Make Prompt: https://huggingface.co/BigSalmon/DefinitionsSynonyms3 | |
| part of speech- verb | |
| definition: grow less in intensity or degree | |
| ex. rather than leave immediately and be drenched, they waited for the storm to ________ | |
| synonyms: subside; moderate; decrease | |
| antonyms: increase | |
| word: abate''') | |
| def get_model(): | |
| #tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| #model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln12") | |
| #model = AutoModelWithLMHead.from_pretrained("BigSalmon/Points") | |
| #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase") | |
| #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase") | |
| #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase") | |
| #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase") | |
| tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms3") | |
| model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms3") | |
| return model, tokenizer | |
| model, tokenizer = get_model() | |
| with st.form(key='my_form'): | |
| prompt = st.text_area(label='Enter sentence') | |
| submit_button = st.form_submit_button(label='Submit') | |
| if submit_button: | |
| with torch.no_grad(): | |
| text = tokenizer.encode(prompt) | |
| myinput, past_key_values = torch.tensor([text]), None | |
| myinput = myinput | |
| myinput= myinput.to(device) | |
| logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False) | |
| logits = logits[0,-1] | |
| probabilities = torch.nn.functional.softmax(logits) | |
| best_logits, best_indices = logits.topk(100) | |
| best_words = [tokenizer.decode([idx.item()]) for idx in best_indices] | |
| text.append(best_indices[0].item()) | |
| best_probabilities = probabilities[best_indices].tolist() | |
| words = [] | |
| st.write(best_words) |