thisisam commited on
Commit
907a197
·
1 Parent(s): c27b407
Files changed (2) hide show
  1. app.py +5 -29
  2. requirements.txt +3 -6
app.py CHANGED
@@ -1,33 +1,9 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import os
4
 
5
- client = InferenceClient(token=os.getenv("HF_TOKEN"))
 
6
 
7
- def chat_with_fara(message, history):
8
- try:
9
- prompt = f"User: {message}\nAssistant:"
10
- response = client.text_generation(
11
- prompt=prompt,
12
- model="microsoft/Fara-7B",
13
- max_new_tokens=500
14
- )
15
- return response
16
- except Exception as e:
17
- return f"Error: {str(e)}"
18
 
19
- with gr.Blocks() as demo:
20
- gr.Markdown("# Fara-7B Chat")
21
- chatbot = gr.Chatbot()
22
- msg = gr.Textbox(label="Message")
23
- clear = gr.Button("Clear")
24
-
25
- def respond(message, chat_history):
26
- response = chat_with_fara(message, chat_history)
27
- chat_history.append((message, response))
28
- return "", chat_history
29
-
30
- msg.submit(respond, [msg, chatbot], [msg, chatbot])
31
- clear.click(lambda: ([], ""), outputs=[chatbot, msg])
32
-
33
- demo.launch(share=True)
 
1
  import gradio as gr
 
 
2
 
3
+ def chat_fn(message, history):
4
+ return f"Response to: {message}"
5
 
6
+ demo = gr.ChatInterface(chat_fn, title="Fara-7B Chat")
 
 
 
 
 
 
 
 
 
 
7
 
8
+ if __name__ == "__main__":
9
+ demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,6 +1,3 @@
1
- gradio>=4.44.1
2
- huggingface-hub>=0.25.0,<2.0.0
3
- datasets>=2.19.0
4
- transformers>=4.35.0
5
- torch>=2.0.0
6
- accelerate>=0.24.0
 
1
+ gradio==4.43.0
2
+ huggingface-hub==0.25.0
3
+ datasets==2.19.0