rbx-imarcin commited on
Commit
d4a76e1
·
verified ·
1 Parent(s): 3d4f8e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -34
app.py CHANGED
@@ -1,45 +1,89 @@
1
  import os
2
- import openai
3
  import gradio as gr
 
4
 
5
- # Replace 'your_api_key_here' with your actual OpenAI API key
6
  api_token = os.getenv("oaikey")
 
7
 
8
- # Set your OpenAI API key
9
- openai.api_key = os.getenv(api_token)
10
-
11
- # Initialize chat history
12
- messages = [{"role": "system", "content": "You are a helpful assistant."}]
13
-
14
- def chatbot(user_input, other):
15
- print(other)
16
- # Append user input to the message history
17
- messages.append({"role": "user", "content": user_input})
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- # Generate response using OpenAI's GPT-3.5-turbo
20
- response = openai.ChatCompletion.create(
21
- model="gpt-3.5-turbo",
22
- messages=messages,
23
- temperature=0.7,
24
- max_tokens=150,
25
- top_p=1,
26
- frequency_penalty=0,
27
- presence_penalty=0
28
- )
29
 
30
- # Extract the assistant's reply
31
- assistant_reply = response.choices[0].message["content"].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- # Append assistant's reply to the message history
34
- messages.append({"role": "assistant", "content": assistant_reply})
 
 
 
35
 
36
- return assistant_reply
37
-
38
- # Create Gradio interface
39
- with gr.Blocks() as demo:
40
- chatbot_ui = gr.ChatInterface(fn=chatbot, title="GPT-3.5 Turbo Chatbot")
 
41
 
42
- # Launch the Gradio app
43
  if __name__ == "__main__":
44
- demo.launch()
45
-
 
1
  import os
2
+ import json
3
  import gradio as gr
4
+ from openai import OpenAI
5
 
6
+ # Initialize the OpenAI client
7
  api_token = os.getenv("oaikey")
8
+ client = OpenAI(api_key=api_token)
9
 
10
+ def generate_response(prompt, system_prompt, temperature, top_p, seed):
11
+ try:
12
+ # Make the API call with the provided parameters
13
+ response = client.chat.completions.create(
14
+ model="gpt-4",
15
+ messages=[
16
+ {"role": "system", "content": system_prompt},
17
+ {"role": "user", "content": prompt}
18
+ ],
19
+ temperature=temperature,
20
+ top_p=top_p,
21
+ seed=seed
22
+ )
23
+
24
+ # Extract the text response
25
+ text_response = response.choices[0].message.content
26
+
27
+ # Convert the full response to a pretty-printed JSON
28
+ response_dict = response.model_dump()
29
+ json_response = json.dumps(response_dict, indent=2)
30
+
31
+ return text_response, json_response
32
 
33
+ except Exception as e:
34
+ return f"Error: {str(e)}", "{}"
35
+
36
+ # Create the Gradio interface
37
+ with gr.Blocks() as app:
38
+ gr.Markdown("# OpenAI API Interface")
 
 
 
 
39
 
40
+ with gr.Row():
41
+ with gr.Column():
42
+ # Input components
43
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=5)
44
+ system_prompt = gr.Textbox(
45
+ label="System Prompt",
46
+ placeholder="You are a helpful assistant...",
47
+ value="You are a helpful assistant.",
48
+ lines=3
49
+ )
50
+
51
+ with gr.Row():
52
+ temperature = gr.Slider(
53
+ minimum=0.0,
54
+ maximum=2.0,
55
+ value=0.7,
56
+ step=0.1,
57
+ label="Temperature"
58
+ )
59
+ top_p = gr.Slider(
60
+ minimum=0.0,
61
+ maximum=1.0,
62
+ value=1.0,
63
+ step=0.05,
64
+ label="Top P"
65
+ )
66
+ seed = gr.Number(
67
+ label="Seed (optional)",
68
+ value=None,
69
+ precision=0
70
+ )
71
+
72
+ submit_button = gr.Button("Generate Response")
73
 
74
+ with gr.Row():
75
+ with gr.Column():
76
+ # Output components
77
+ text_output = gr.Textbox(label="Generated Text", lines=8)
78
+ json_output = gr.Code(language="json", label="Full API Response")
79
 
80
+ # Connect the input components to the function and the output components
81
+ submit_button.click(
82
+ fn=generate_response,
83
+ inputs=[prompt, system_prompt, temperature, top_p, seed],
84
+ outputs=[text_output, json_output]
85
+ )
86
 
87
+ # Launch the app
88
  if __name__ == "__main__":
89
+ app.launch()