ViktorMardskog commited on
Commit
6aef632
·
1 Parent(s): 62c85fc

app and req file

Browse files
Files changed (2) hide show
  1. app.py +233 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from llama_cpp import Llama
4
+
5
+ from huggingface_hub import hf_hub_download
6
+ import os
7
+ #https://llama-cpp-python.readthedocs.io/en/latest/api-reference/
8
+
9
+ model_path = hf_hub_download(
10
+ repo_id="ViktorMardskog/lora_model_3e-4LR_10k_1b_val",
11
+ filename="llama-3.2-1b-instruct.Q4_K_M.gguf",
12
+ repo_type="model",
13
+ )
14
+
15
+ llm = Llama(model_path=model_path, n_ctx=500, n_gpu_layers=0, verbose=False)
16
+
17
+ #to set brainstoorming model to same, uncomment this and comment the other.
18
+ """ model_path_B = hf_hub_download(
19
+ repo_id="ViktorMardskog/lora_model_3e-4LR_10k_1b_val",
20
+ filename="llama-3.2-1b-instruct.Q4_K_M.gguf",
21
+ repo_type="model",
22
+ ) """
23
+
24
+
25
+ model_path_B = hf_hub_download(
26
+ repo_id="ViktorMardskog/lora_model_3e-4LR_1b_val_BrainStoorming",
27
+ filename="llama-3.2-1b-instruct.Q4_K_M.gguf",
28
+ repo_type="model",
29
+ )
30
+
31
+ llm_brainstoorming = Llama(model_path=model_path_B, n_ctx=500, n_gpu_layers=0, verbose=False)
32
+
33
+
34
+
35
+
36
+ def evaluate(expanded_idea):
37
+ messages = [{"role": "system", "content": "You are an assistant."}]
38
+
39
+ message =f"Idea to be evaluated: {expanded_idea}.\n Evaluate the idea with a score (1–10) and give 3 concrete concerns.\n Format:\n Score: X/10\n Concerns:\n 1. ...\n 2. ...\n 3. ..."
40
+
41
+ messages.append({"role": "user", "content": message})
42
+
43
+ #This code snippet below is directly taken from the UI chatbot template in huggingface (gradio)
44
+ response = ""
45
+
46
+ for message_part in llm.create_chat_completion(
47
+ messages,
48
+ max_tokens=128,
49
+ stream=True,
50
+ temperature=0.8,
51
+ top_p=0.9,
52
+ ):
53
+ choices = message_part['choices']
54
+ token = ""
55
+ if choices and choices[0]["delta"].get("content"):
56
+ token =choices[0]["delta"]["content"]
57
+ response += token
58
+ yield response
59
+
60
+ def refine(expanded_idea):
61
+ messages = [{"role": "system", "content": "You are an assistant."}]
62
+
63
+ message =f"Refine the following idea: {expanded_idea}."
64
+
65
+ messages.append({"role": "user", "content": message})
66
+
67
+ #This code snippet below is directly taken from the UI chatbot template in huggingface (gradio)
68
+ response = ""
69
+
70
+ for message_part in llm.create_chat_completion(
71
+ messages,
72
+ max_tokens=128,
73
+ stream=True,
74
+ temperature=0.8,
75
+ top_p=0.9,
76
+ ):
77
+ choices = message_part['choices']
78
+ token = ""
79
+ if choices and choices[0]["delta"].get("content"):
80
+ token =choices[0]["delta"]["content"]
81
+ response += token
82
+ yield response
83
+
84
+ def brainstorm(expanded_idea):
85
+ messages = [{"role": "system", "content": "You are an assistant."}]
86
+
87
+ message =f"Idea Input: {expanded_idea}. Brainstorm three features this idea could include. Reply ONLY with bullet-points.\n"
88
+
89
+ messages.append({"role": "user", "content": message})
90
+
91
+ #This code snippet below is directly taken from the UI chatbot template in huggingface (gradio)
92
+ response = ""
93
+
94
+ for message_part in llm_brainstoorming.create_chat_completion(
95
+ messages = messages,
96
+ max_tokens=128,
97
+ stream=True,
98
+ temperature=0.8,
99
+ top_p=0.9,
100
+ ):
101
+ choices = message_part['choices']
102
+ token = ""
103
+ if choices and choices[0]["delta"].get("content"):
104
+ token =choices[0]["delta"]["content"]
105
+ response += token
106
+ yield response
107
+
108
+
109
+ def add_note(notes,title):
110
+ if notes is None:
111
+ notes = []
112
+
113
+ title =title.strip()
114
+
115
+ notes = notes +[{"title": title, "text": ""}]
116
+ choices = [n["title"] for n in notes]
117
+
118
+ return gr.update(choices=choices, value=title), notes,""
119
+
120
+ def select_note(notes, selected_title):
121
+ if notes is None:
122
+ notes = []
123
+ if not selected_title:
124
+ return "", notes
125
+
126
+ for n in notes:
127
+ if n["title"] == selected_title:
128
+ return n["text"], notes
129
+
130
+ return "", notes
131
+
132
+ def update_note_text(notes_state, selected_title, new_expanded_idea):
133
+ if not selected_title:
134
+ return notes_state
135
+ if notes_state is None:
136
+ notes_state = []
137
+
138
+ new_notes= []
139
+ for note in notes_state:
140
+ if note["title"] == selected_title:
141
+ new_notes.append({"title": note["title"], "text": new_expanded_idea})
142
+ else:
143
+ new_notes.append(note)
144
+ return new_notes
145
+
146
+ def delete_note(notes, selected_title):
147
+ if notes is None: notes = []
148
+ if not selected_title:
149
+ #then keep everything like before because not selected any for deletion
150
+ choices = [n["title"] for n in notes]
151
+ return gr.update(choices=choices,value=None), notes
152
+
153
+ new_notes =[]
154
+ for note in notes:
155
+ if note["title"]!= selected_title:
156
+ new_notes.append(note)
157
+
158
+ choices = []
159
+ for new_note in new_notes:
160
+ choices.append(new_note["title"])
161
+
162
+ return gr.update(choices=choices, value=None),new_notes
163
+
164
+
165
+
166
+ def append_LLM_text(expanded_idea, llm_output):
167
+ if not llm_output: return expanded_idea
168
+ if not expanded_idea: return llm_output
169
+
170
+ return expanded_idea + "\n" + llm_output
171
+
172
+ def replace_idea_text(expanded_idea, llm_output):
173
+ new =llm_output or expanded_idea
174
+ return new
175
+
176
+ with gr.Blocks(title="Brainstorming helper") as demo:
177
+ notes_state = gr.State([]) #title --> body
178
+
179
+ with gr.Row():
180
+
181
+ with gr.Column(scale=2):
182
+ gr.Markdown("Sticky notes")
183
+ notes_list = gr.Radio(choices=[], label="Ideas", interactive=True)
184
+ new_title = gr.Textbox(label="New idea title", placeholder="Short title for the idea")
185
+ add_note_btn = gr.Button("Add sticky note")
186
+ delete_btn = gr.Button("Delete selected note")
187
+
188
+ with gr.Column(scale=3):
189
+ expanded_idea = gr.Textbox(label="Expanded idea", lines=12, placeholder="Describe the idea..(stickynote has to be selected)")
190
+ with gr.Row():
191
+ save_btn = gr.Button("Save idea")
192
+ with gr.Row():
193
+ refine_btn = gr.Button("Refine idea")
194
+ brainstorm_btn = gr.Button("Brainstorm around idea")
195
+ evaluate_btn = gr.Button("Evaluate idea")
196
+
197
+ llm_output_box=gr.Textbox(label="LLM output",lines=10,interactive=False)
198
+ with gr.Row():
199
+ append_btn = gr.Button("Append to expanded idea")
200
+ replace_btn = gr.Button("Replace expanded idea")
201
+
202
+ save_btn.click(update_note_text, inputs=[notes_state, notes_list, expanded_idea], outputs=[notes_state])
203
+
204
+ refine_btn.click(refine, inputs=expanded_idea, outputs=llm_output_box)
205
+ brainstorm_btn.click(brainstorm, inputs=expanded_idea, outputs=llm_output_box)
206
+ evaluate_btn.click(evaluate, inputs=expanded_idea, outputs=llm_output_box)
207
+
208
+
209
+ add_note_btn.click(add_note, inputs=[notes_state, new_title],outputs=[notes_list, notes_state, expanded_idea])
210
+ notes_list.change(select_note, inputs=[notes_state, notes_list],outputs=[expanded_idea, notes_state])
211
+ delete_btn.click(delete_note, inputs=[notes_state, notes_list], outputs=[notes_list, notes_state])
212
+
213
+ append_btn.click(append_LLM_text,inputs=[expanded_idea, llm_output_box], outputs=[expanded_idea])
214
+
215
+ replace_btn.click(replace_idea_text, inputs=[expanded_idea, llm_output_box], outputs=[expanded_idea])
216
+
217
+
218
+ if __name__ == "__main__":
219
+ demo.queue()
220
+ demo.launch()
221
+
222
+
223
+
224
+
225
+
226
+
227
+
228
+
229
+
230
+
231
+
232
+
233
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio>=4.0.0
2
+ huggingface-hub
3
+ llama-cpp-python==0.2.90