MeowSky49887 commited on
Commit
c2e19e0
·
verified ·
1 Parent(s): cfa48e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -99
app.py CHANGED
@@ -211,85 +211,6 @@ async def train_model_async(progress=gr.Progress(track_tqdm=True)):
211
  except Exception as e:
212
  raise gr.Error(e)
213
 
214
- DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。"
215
- memory = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
216
-
217
- def reset_memory():
218
- global memory
219
- memory = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
220
- return None
221
-
222
- def init():
223
- try:
224
- global llamaTokenizer, llamaModel
225
- llamaTokenizer = AutoTokenizer.from_pretrained("elyza/Llama-3-ELYZA-JP-8B", trust_remote_code=True)
226
- llamaModel = AutoModelForCausalLM.from_pretrained(
227
- "elyza/Llama-3-ELYZA-JP-8B",
228
- torch_dtype="auto",
229
- device_map="auto",
230
- )
231
- llamaModel.eval()
232
-
233
- return [gr.Button(visible=False), gr.Textbox(visible=True), gr.Button(visible=True)]
234
- except Exception as e:
235
- raise gr.Error(e)
236
-
237
- async def chat(message):
238
- try:
239
- async with Translator() as translator:
240
- translated_input = await translator.translate(message, dest="ja")
241
- jp_input = translated_input.text
242
- output_language = translated_input.src
243
-
244
- global memory
245
-
246
- memory.append({"role": "user", "content": jp_input})
247
-
248
- prompt = llamaTokenizer.apply_chat_template(
249
- memory,
250
- tokenize=False,
251
- add_generation_prompt=True
252
- )
253
-
254
- token_ids = llamaTokenizer.encode(
255
- prompt, add_special_tokens=False, return_tensors="pt"
256
- )
257
-
258
- with torch.no_grad():
259
- output_ids = llamaModel.generate(
260
- token_ids.to(llamaModel.device),
261
- max_new_tokens=1024,
262
- do_sample=True,
263
- temperature=0.6,
264
- top_p=0.9,
265
- )
266
-
267
- output = llamaTokenizer.decode(
268
- output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True
269
- )
270
-
271
- memory.append({"role": "assistant", "content": output})
272
-
273
- translated_output = await translator.translate(output, dest=output_language)
274
-
275
- if os.path.exists(f"./VRM-Emotions/model.safetensors"):
276
- newTokenizer = AutoTokenizer.from_pretrained("./VRM-Emotions", trust_remote_code=True)
277
- newConfig = AutoConfig.from_pretrained("./VRM-Emotions")
278
- newModel = AutoModelForSequenceClassification.from_pretrained("./VRM-Emotions", config=newConfig)
279
-
280
- sentence = newTokenizer(output, return_tensors="pt", padding=True, truncation=True, max_length=512)
281
- with torch.no_grad():
282
- outputs = newModel(**sentence)
283
- logits = outputs.logits
284
- predicted = torch.argmax(logits, dim=-1).item()
285
- label = newModel.config.id2label[predicted]
286
-
287
- return "[" + label + "] " + translated_output.text
288
- else:
289
- return translated_output.text
290
- except Exception as e:
291
- raise gr.Error(e)
292
-
293
  with gr.Blocks() as demo:
294
  with gr.Tab("Prepare Dataset"):
295
  dataset_files = gr.Files(label="CSV Files")
@@ -302,24 +223,4 @@ with gr.Blocks() as demo:
302
  train_btn = gr.Button("Train All")
303
  train_btn.click(train_model_async, inputs=None, outputs=[train_results, models_files])
304
 
305
- with gr.Tab("Testing"):
306
- chatbot = gr.Chatbot(type="messages")
307
- start = gr.Button("Start")
308
- msg = gr.Textbox(visible=False)
309
- clear = gr.Button("Clear", visible=False)
310
-
311
- async def user(history, message):
312
- return history + [{"role": "user", "content": message}], ""
313
-
314
- async def llm(history):
315
- message = next(
316
- (entry["content"] for entry in reversed(history) if entry["role"] == "user"), None
317
- )
318
- response = await chat(message)
319
- return history + [{"role": "assistant", "content": response}]
320
-
321
- start.click(init, None, [start, msg, clear])
322
- msg.submit(user, [chatbot, msg], [chatbot, msg], queue=False).then(llm, chatbot, chatbot)
323
- clear.click(reset_memory, None, chatbot, queue=False)
324
-
325
  demo.launch()
 
211
  except Exception as e:
212
  raise gr.Error(e)
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  with gr.Blocks() as demo:
215
  with gr.Tab("Prepare Dataset"):
216
  dataset_files = gr.Files(label="CSV Files")
 
223
  train_btn = gr.Button("Train All")
224
  train_btn.click(train_model_async, inputs=None, outputs=[train_results, models_files])
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  demo.launch()