Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -72,17 +72,40 @@
|
|
| 72 |
# st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png")
|
| 73 |
# st.success("Thanks for using the app !!!")
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
import torch
|
| 76 |
import streamlit as st
|
| 77 |
-
from transformers import
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
model
|
| 82 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
# st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png")
|
| 73 |
# st.success("Thanks for using the app !!!")
|
| 74 |
|
| 75 |
+
# import torch
|
| 76 |
+
# import streamlit as st
|
| 77 |
+
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 78 |
+
|
| 79 |
+
# #torch.set_default_device("cuda")
|
| 80 |
+
|
| 81 |
+
# model = AutoModelForCausalLM.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", torch_dtype="auto", trust_remote_code=True)
|
| 82 |
+
# tokenizer = AutoTokenizer.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", trust_remote_code=True)
|
| 83 |
+
# i=st.text_input('Prompt', 'Life of Brian')
|
| 84 |
+
# #inputs = tokenizer('''### Instruction: What Does Basic Homeowners Insurance Cover?\n### Response: ''', return_tensors="pt", return_attention_mask=False)
|
| 85 |
+
# inputs = tokenizer(i, return_tensors="pt", return_attention_mask=False)
|
| 86 |
+
# outputs = model.generate(**inputs, max_length=1024)
|
| 87 |
+
# text = tokenizer.batch_decode(outputs)[0]
|
| 88 |
+
# print(text)
|
| 89 |
+
|
| 90 |
import torch
|
| 91 |
import streamlit as st
|
| 92 |
+
from transformers import AutoModelForCausalseq2seqLM, AutoTokenizer
|
| 93 |
+
|
| 94 |
+
model_name="facebook/blenderbot-400M-distill"
|
| 95 |
+
|
| 96 |
+
model=AutoModelForCausalseq2seqLM.from_pretrained(model_name)
|
| 97 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 98 |
+
ch=[]
|
| 99 |
+
chat():
|
| 100 |
+
|
| 101 |
+
h_s="\n".join(ch)
|
| 102 |
+
i=st.input("enter")
|
| 103 |
+
IS=TOKENIZER.ENCODE_PLUS(H_S,I,return_tensors="pt")
|
| 104 |
+
outputs=model.generate(**inputs,max_length=60)
|
| 105 |
+
response=tokenizer.decode(outputs[0],skip_special_tokens=True).strip()
|
| 106 |
+
c_h.appned(i)
|
| 107 |
+
c_h.append(response)
|
| 108 |
+
return response
|
| 109 |
+
if __name__ == "__main__":
|
| 110 |
+
chat()
|
| 111 |
+
|